Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.13.7
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25#include <linux/module.h>
  26#include <linux/pci.h>
  27
  28#include <drm/drm_cache.h>
  29#include "amdgpu.h"
  30#include "gmc_v8_0.h"
  31#include "amdgpu_ucode.h"
  32#include "amdgpu_amdkfd.h"
  33#include "amdgpu_gem.h"
  34
  35#include "gmc/gmc_8_1_d.h"
  36#include "gmc/gmc_8_1_sh_mask.h"
  37
  38#include "bif/bif_5_0_d.h"
  39#include "bif/bif_5_0_sh_mask.h"
  40
  41#include "oss/oss_3_0_d.h"
  42#include "oss/oss_3_0_sh_mask.h"
  43
  44#include "dce/dce_10_0_d.h"
  45#include "dce/dce_10_0_sh_mask.h"
  46
  47#include "vid.h"
  48#include "vi.h"
  49
  50#include "amdgpu_atombios.h"
  51
  52#include "ivsrcid/ivsrcid_vislands30.h"
  53
  54static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
  55static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
  56static int gmc_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
  57
  58MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
  59MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
  60MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
  61MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
  62MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin");
  63MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
  64MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
  65MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
  66
  67static const u32 golden_settings_tonga_a11[] = {
 
  68	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
  69	mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
  70	mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
  71	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  72	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  73	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  74	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  75};
  76
  77static const u32 tonga_mgcg_cgcg_init[] = {
 
  78	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  79};
  80
  81static const u32 golden_settings_fiji_a10[] = {
 
  82	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  83	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  84	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  85	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  86};
  87
  88static const u32 fiji_mgcg_cgcg_init[] = {
 
  89	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  90};
  91
  92static const u32 golden_settings_polaris11_a11[] = {
 
  93	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  94	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  95	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  96	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
  97};
  98
  99static const u32 golden_settings_polaris10_a11[] = {
 
 100	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
 101	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
 102	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
 103	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
 104	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
 105};
 106
 107static const u32 cz_mgcg_cgcg_init[] = {
 
 108	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 109};
 110
 111static const u32 stoney_mgcg_cgcg_init[] = {
 
 112	mmATC_MISC_CG, 0xffffffff, 0x000c0200,
 113	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 114};
 115
 116static const u32 golden_settings_stoney_common[] = {
 
 117	mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
 118	mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
 119};
 120
 121static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
 122{
 123	switch (adev->asic_type) {
 124	case CHIP_FIJI:
 125		amdgpu_device_program_register_sequence(adev,
 126							fiji_mgcg_cgcg_init,
 127							ARRAY_SIZE(fiji_mgcg_cgcg_init));
 128		amdgpu_device_program_register_sequence(adev,
 129							golden_settings_fiji_a10,
 130							ARRAY_SIZE(golden_settings_fiji_a10));
 131		break;
 132	case CHIP_TONGA:
 133		amdgpu_device_program_register_sequence(adev,
 134							tonga_mgcg_cgcg_init,
 135							ARRAY_SIZE(tonga_mgcg_cgcg_init));
 136		amdgpu_device_program_register_sequence(adev,
 137							golden_settings_tonga_a11,
 138							ARRAY_SIZE(golden_settings_tonga_a11));
 139		break;
 140	case CHIP_POLARIS11:
 141	case CHIP_POLARIS12:
 142	case CHIP_VEGAM:
 143		amdgpu_device_program_register_sequence(adev,
 144							golden_settings_polaris11_a11,
 145							ARRAY_SIZE(golden_settings_polaris11_a11));
 146		break;
 147	case CHIP_POLARIS10:
 148		amdgpu_device_program_register_sequence(adev,
 149							golden_settings_polaris10_a11,
 150							ARRAY_SIZE(golden_settings_polaris10_a11));
 151		break;
 152	case CHIP_CARRIZO:
 153		amdgpu_device_program_register_sequence(adev,
 154							cz_mgcg_cgcg_init,
 155							ARRAY_SIZE(cz_mgcg_cgcg_init));
 156		break;
 157	case CHIP_STONEY:
 158		amdgpu_device_program_register_sequence(adev,
 159							stoney_mgcg_cgcg_init,
 160							ARRAY_SIZE(stoney_mgcg_cgcg_init));
 161		amdgpu_device_program_register_sequence(adev,
 162							golden_settings_stoney_common,
 163							ARRAY_SIZE(golden_settings_stoney_common));
 164		break;
 165	default:
 166		break;
 167	}
 168}
 169
 170static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
 171{
 172	u32 blackout;
 173	struct amdgpu_ip_block *ip_block;
 174
 175	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
 176	if (!ip_block)
 177		return;
 178
 179	gmc_v8_0_wait_for_idle(ip_block);
 180
 181	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 182	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
 183		/* Block CPU access */
 184		WREG32(mmBIF_FB_EN, 0);
 185		/* blackout the MC */
 186		blackout = REG_SET_FIELD(blackout,
 187					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
 188		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
 189	}
 190	/* wait for the MC to settle */
 191	udelay(100);
 192}
 193
 194static void gmc_v8_0_mc_resume(struct amdgpu_device *adev)
 195{
 196	u32 tmp;
 197
 198	/* unblackout the MC */
 199	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 200	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
 201	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
 202	/* allow CPU access */
 203	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
 204	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
 205	WREG32(mmBIF_FB_EN, tmp);
 206}
 207
 208/**
 209 * gmc_v8_0_init_microcode - load ucode images from disk
 210 *
 211 * @adev: amdgpu_device pointer
 212 *
 213 * Use the firmware interface to load the ucode images into
 214 * the driver (not loaded into hw).
 215 * Returns 0 on success, error on failure.
 216 */
 217static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
 218{
 219	const char *chip_name;
 
 220	int err;
 221
 222	DRM_DEBUG("\n");
 223
 224	switch (adev->asic_type) {
 225	case CHIP_TONGA:
 226		chip_name = "tonga";
 227		break;
 228	case CHIP_POLARIS11:
 229		if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
 230		    ASICID_IS_P31(adev->pdev->device, adev->pdev->revision))
 231			chip_name = "polaris11_k";
 232		else
 233			chip_name = "polaris11";
 234		break;
 235	case CHIP_POLARIS10:
 236		if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision))
 237			chip_name = "polaris10_k";
 238		else
 239			chip_name = "polaris10";
 240		break;
 241	case CHIP_POLARIS12:
 242		if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
 243			chip_name = "polaris12_k";
 244		} else {
 245			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159);
 246			/* Polaris12 32bit ASIC needs a special MC firmware */
 247			if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40)
 248				chip_name = "polaris12_32";
 249			else
 250				chip_name = "polaris12";
 251		}
 252		break;
 253	case CHIP_FIJI:
 254	case CHIP_CARRIZO:
 255	case CHIP_STONEY:
 256	case CHIP_VEGAM:
 257		return 0;
 258	default:
 259		return -EINVAL;
 260	}
 261
 262	err = amdgpu_ucode_request(adev, &adev->gmc.fw, "amdgpu/%s_mc.bin", chip_name);
 
 
 
 
 
 
 263	if (err) {
 264		pr_err("mc: Failed to load firmware \"%s_mc.bin\"\n", chip_name);
 265		amdgpu_ucode_release(&adev->gmc.fw);
 
 266	}
 267	return err;
 268}
 269
 270/**
 271 * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw
 272 *
 273 * @adev: amdgpu_device pointer
 274 *
 275 * Load the GDDR MC ucode into the hw (VI).
 276 * Returns 0 on success, error on failure.
 277 */
 278static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
 279{
 280	const struct mc_firmware_header_v1_0 *hdr;
 281	const __le32 *fw_data = NULL;
 282	const __le32 *io_mc_regs = NULL;
 283	u32 running;
 284	int i, ucode_size, regs_size;
 285
 286	/* Skip MC ucode loading on SR-IOV capable boards.
 287	 * vbios does this for us in asic_init in that case.
 288	 * Skip MC ucode loading on VF, because hypervisor will do that
 289	 * for this adaptor.
 290	 */
 291	if (amdgpu_sriov_bios(adev))
 292		return 0;
 293
 294	if (!adev->gmc.fw)
 295		return -EINVAL;
 296
 297	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
 298	amdgpu_ucode_print_mc_hdr(&hdr->header);
 299
 300	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 301	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
 302	io_mc_regs = (const __le32 *)
 303		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
 304	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 305	fw_data = (const __le32 *)
 306		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 307
 308	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
 309
 310	if (running == 0) {
 311		/* reset the engine and set to writable */
 312		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 313		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
 314
 315		/* load mc io regs */
 316		for (i = 0; i < regs_size; i++) {
 317			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
 318			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
 319		}
 320		/* load the MC ucode */
 321		for (i = 0; i < ucode_size; i++)
 322			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
 323
 324		/* put the engine back into the active state */
 325		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 326		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
 327		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
 328
 329		/* wait for training to complete */
 330		for (i = 0; i < adev->usec_timeout; i++) {
 331			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
 332					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
 333				break;
 334			udelay(1);
 335		}
 336		for (i = 0; i < adev->usec_timeout; i++) {
 337			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
 338					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
 339				break;
 340			udelay(1);
 341		}
 342	}
 343
 344	return 0;
 345}
 346
 347static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
 348{
 349	const struct mc_firmware_header_v1_0 *hdr;
 350	const __le32 *fw_data = NULL;
 351	const __le32 *io_mc_regs = NULL;
 352	u32 data;
 353	int i, ucode_size, regs_size;
 354
 355	/* Skip MC ucode loading on SR-IOV capable boards.
 356	 * vbios does this for us in asic_init in that case.
 357	 * Skip MC ucode loading on VF, because hypervisor will do that
 358	 * for this adaptor.
 359	 */
 360	if (amdgpu_sriov_bios(adev))
 361		return 0;
 362
 
 
 
 
 
 
 
 363	if (!adev->gmc.fw)
 364		return -EINVAL;
 365
 366	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
 367	amdgpu_ucode_print_mc_hdr(&hdr->header);
 368
 369	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 370	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
 371	io_mc_regs = (const __le32 *)
 372		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
 373	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 374	fw_data = (const __le32 *)
 375		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 376
 377	data = RREG32(mmMC_SEQ_MISC0);
 378	data &= ~(0x40);
 379	WREG32(mmMC_SEQ_MISC0, data);
 380
 381	/* load mc io regs */
 382	for (i = 0; i < regs_size; i++) {
 383		WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
 384		WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
 385	}
 386
 387	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 388	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
 389
 390	/* load the MC ucode */
 391	for (i = 0; i < ucode_size; i++)
 392		WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
 393
 394	/* put the engine back into the active state */
 395	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 396	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
 397	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
 398
 399	/* wait for training to complete */
 400	for (i = 0; i < adev->usec_timeout; i++) {
 401		data = RREG32(mmMC_SEQ_MISC0);
 402		if (data & 0x80)
 403			break;
 404		udelay(1);
 405	}
 406
 407	return 0;
 408}
 409
 410static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
 411				       struct amdgpu_gmc *mc)
 412{
 413	u64 base = 0;
 414
 415	if (!amdgpu_sriov_vf(adev))
 416		base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
 417	base <<= 24;
 418
 419	amdgpu_gmc_set_agp_default(adev, mc);
 420	amdgpu_gmc_vram_location(adev, mc, base);
 421	amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
 422}
 423
 424/**
 425 * gmc_v8_0_mc_program - program the GPU memory controller
 426 *
 427 * @adev: amdgpu_device pointer
 428 *
 429 * Set the location of vram, gart, and AGP in the GPU's
 430 * physical address space (VI).
 431 */
 432static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
 433{
 434	struct amdgpu_ip_block *ip_block;
 435	u32 tmp;
 436	int i, j;
 437
 438	/* Initialize HDP */
 439	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
 440		WREG32((0xb05 + j), 0x00000000);
 441		WREG32((0xb06 + j), 0x00000000);
 442		WREG32((0xb07 + j), 0x00000000);
 443		WREG32((0xb08 + j), 0x00000000);
 444		WREG32((0xb09 + j), 0x00000000);
 445	}
 446	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 447
 448	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
 449	if (!ip_block)
 450		return;
 451
 452	if (gmc_v8_0_wait_for_idle(ip_block))
 453		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 454
 455	if (adev->mode_info.num_crtc) {
 456		/* Lockout access through VGA aperture*/
 457		tmp = RREG32(mmVGA_HDP_CONTROL);
 458		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
 459		WREG32(mmVGA_HDP_CONTROL, tmp);
 460
 461		/* disable VGA render */
 462		tmp = RREG32(mmVGA_RENDER_CONTROL);
 463		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
 464		WREG32(mmVGA_RENDER_CONTROL, tmp);
 465	}
 466	/* Update configuration */
 467	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 468	       adev->gmc.vram_start >> 12);
 469	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 470	       adev->gmc.vram_end >> 12);
 471	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
 472	       adev->mem_scratch.gpu_addr >> 12);
 473
 474	if (amdgpu_sriov_vf(adev)) {
 475		tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
 476		tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF);
 477		WREG32(mmMC_VM_FB_LOCATION, tmp);
 478		/* XXX double check these! */
 479		WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
 480		WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
 481		WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
 482	}
 483
 484	WREG32(mmMC_VM_AGP_BASE, 0);
 485	WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22);
 486	WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22);
 487	if (gmc_v8_0_wait_for_idle(ip_block))
 488		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 
 489
 490	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
 491
 492	tmp = RREG32(mmHDP_MISC_CNTL);
 493	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
 494	WREG32(mmHDP_MISC_CNTL, tmp);
 495
 496	tmp = RREG32(mmHDP_HOST_PATH_CNTL);
 497	WREG32(mmHDP_HOST_PATH_CNTL, tmp);
 498}
 499
 500/**
 501 * gmc_v8_0_mc_init - initialize the memory controller driver params
 502 *
 503 * @adev: amdgpu_device pointer
 504 *
 505 * Look up the amount of vram, vram width, and decide how to place
 506 * vram and gart within the GPU's physical address space (VI).
 507 * Returns 0 for success.
 508 */
 509static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
 510{
 511	int r;
 512	u32 tmp;
 513
 514	adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
 515	if (!adev->gmc.vram_width) {
 
 516		int chansize, numchan;
 517
 518		/* Get VRAM informations */
 519		tmp = RREG32(mmMC_ARB_RAMCFG);
 520		if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE))
 521			chansize = 64;
 522		else
 523			chansize = 32;
 524
 525		tmp = RREG32(mmMC_SHARED_CHMAP);
 526		switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
 527		case 0:
 528		default:
 529			numchan = 1;
 530			break;
 531		case 1:
 532			numchan = 2;
 533			break;
 534		case 2:
 535			numchan = 4;
 536			break;
 537		case 3:
 538			numchan = 8;
 539			break;
 540		case 4:
 541			numchan = 3;
 542			break;
 543		case 5:
 544			numchan = 6;
 545			break;
 546		case 6:
 547			numchan = 10;
 548			break;
 549		case 7:
 550			numchan = 12;
 551			break;
 552		case 8:
 553			numchan = 16;
 554			break;
 555		}
 556		adev->gmc.vram_width = numchan * chansize;
 557	}
 558	/* size in MB on si */
 559	tmp = RREG32(mmCONFIG_MEMSIZE);
 560	/* some boards may have garbage in the upper 16 bits */
 561	if (tmp & 0xffff0000) {
 562		DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
 563		if (tmp & 0xffff)
 564			tmp &= 0xffff;
 565	}
 566	adev->gmc.mc_vram_size = tmp * 1024ULL * 1024ULL;
 567	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
 568
 569	if (!(adev->flags & AMD_IS_APU)) {
 570		r = amdgpu_device_resize_fb_bar(adev);
 571		if (r)
 572			return r;
 573	}
 574	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
 575	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 576
 577#ifdef CONFIG_X86_64
 578	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
 579		adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
 580		adev->gmc.aper_size = adev->gmc.real_vram_size;
 581	}
 582#endif
 583
 
 584	adev->gmc.visible_vram_size = adev->gmc.aper_size;
 
 
 585
 586	/* set the gart size */
 587	if (amdgpu_gart_size == -1) {
 588		switch (adev->asic_type) {
 589		case CHIP_POLARIS10: /* all engines support GPUVM */
 590		case CHIP_POLARIS11: /* all engines support GPUVM */
 
 591		case CHIP_POLARIS12: /* all engines support GPUVM */
 592		case CHIP_VEGAM:     /* all engines support GPUVM */
 593		default:
 594			adev->gmc.gart_size = 256ULL << 20;
 595			break;
 596		case CHIP_TONGA:   /* UVD, VCE do not support GPUVM */
 597		case CHIP_FIJI:    /* UVD, VCE do not support GPUVM */
 598		case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
 599		case CHIP_STONEY:  /* UVD does not support GPUVM, DCE SG support */
 600			adev->gmc.gart_size = 1024ULL << 20;
 601			break;
 602		}
 603	} else {
 604		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
 605	}
 606
 607	adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
 608	gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
 609
 610	return 0;
 611}
 612
 613/**
 614 * gmc_v8_0_flush_gpu_tlb_pasid - tlb flush via pasid
 615 *
 616 * @adev: amdgpu_device pointer
 617 * @pasid: pasid to be flush
 618 * @flush_type: type of flush
 619 * @all_hub: flush all hubs
 620 * @inst: is used to select which instance of KIQ to use for the invalidation
 621 *
 622 * Flush the TLB for the requested pasid.
 623 */
 624static void gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
 625					 uint16_t pasid, uint32_t flush_type,
 626					 bool all_hub, uint32_t inst)
 627{
 628	u32 mask = 0x0;
 629	int vmid;
 630
 631	for (vmid = 1; vmid < 16; vmid++) {
 632		u32 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
 633
 634		if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
 635		    (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid)
 636			mask |= 1 << vmid;
 637	}
 638
 639	WREG32(mmVM_INVALIDATE_REQUEST, mask);
 640	RREG32(mmVM_INVALIDATE_RESPONSE);
 641}
 642
 643/*
 644 * GART
 645 * VMID 0 is the physical GPU addresses as used by the kernel.
 646 * VMIDs 1-15 are used for userspace clients and are handled
 647 * by the amdgpu vm/hsa code.
 648 */
 649
 650/**
 651 * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
 652 *
 653 * @adev: amdgpu_device pointer
 654 * @vmid: vm instance to flush
 655 * @vmhub: which hub to flush
 656 * @flush_type: type of flush
 657 *
 658 * Flush the TLB for the requested page table (VI).
 659 */
 660static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 661					uint32_t vmhub, uint32_t flush_type)
 662{
 663	/* bits 0-15 are the VM contexts0-15 */
 664	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 665}
 666
 667static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 668					    unsigned int vmid, uint64_t pd_addr)
 669{
 670	uint32_t reg;
 671
 672	if (vmid < 8)
 673		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
 674	else
 675		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
 676	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
 677
 678	/* bits 0-15 are the VM contexts0-15 */
 679	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
 680
 681	return pd_addr;
 682}
 683
 684static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
 685					unsigned int pasid)
 686{
 687	amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
 688}
 689
 690/*
 691 * PTE format on VI:
 692 * 63:40 reserved
 693 * 39:12 4k physical page base address
 694 * 11:7 fragment
 695 * 6 write
 696 * 5 read
 697 * 4 exe
 698 * 3 reserved
 699 * 2 snooped
 700 * 1 system
 701 * 0 valid
 702 *
 703 * PDE format on VI:
 704 * 63:59 block fragment size
 705 * 58:40 reserved
 706 * 39:1 physical base address of PTE
 707 * bits 5:1 must be 0.
 708 * 0 valid
 709 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 710
 711static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
 712				uint64_t *addr, uint64_t *flags)
 713{
 714	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
 715}
 716
 717static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
 718				struct amdgpu_bo_va_mapping *mapping,
 719				uint64_t *flags)
 720{
 721	*flags &= ~AMDGPU_PTE_EXECUTABLE;
 722	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
 723	*flags &= ~AMDGPU_PTE_PRT;
 724}
 725
 726/**
 727 * gmc_v8_0_set_fault_enable_default - update VM fault handling
 728 *
 729 * @adev: amdgpu_device pointer
 730 * @value: true redirects VM faults to the default page
 731 */
 732static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
 733					      bool value)
 734{
 735	u32 tmp;
 736
 737	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 738	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 739			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 740	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 741			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 742	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 743			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 744	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 745			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 746	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 747			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 748	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 749			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 750	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 751			    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 752	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 753}
 754
 755/**
 756 * gmc_v8_0_set_prt() - set PRT VM fault
 757 *
 758 * @adev: amdgpu_device pointer
 759 * @enable: enable/disable VM fault handling for PRT
 760 */
 761static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
 762{
 763	u32 tmp;
 764
 765	if (enable && !adev->gmc.prt_warning) {
 766		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
 767		adev->gmc.prt_warning = true;
 768	}
 769
 770	tmp = RREG32(mmVM_PRT_CNTL);
 771	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 772			    CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
 773	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 774			    CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
 775	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 776			    TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
 777	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 778			    TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
 779	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 780			    L2_CACHE_STORE_INVALID_ENTRIES, enable);
 781	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 782			    L1_TLB_STORE_INVALID_ENTRIES, enable);
 783	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 784			    MASK_PDE0_FAULT, enable);
 785	WREG32(mmVM_PRT_CNTL, tmp);
 786
 787	if (enable) {
 788		uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >>
 789			AMDGPU_GPU_PAGE_SHIFT;
 790		uint32_t high = adev->vm_manager.max_pfn -
 791			(AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT);
 792
 793		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
 794		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
 795		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
 796		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
 797		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
 798		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
 799		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
 800		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
 801	} else {
 802		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
 803		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
 804		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
 805		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
 806		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
 807		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
 808		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
 809		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
 810	}
 811}
 812
 813/**
 814 * gmc_v8_0_gart_enable - gart enable
 815 *
 816 * @adev: amdgpu_device pointer
 817 *
 818 * This sets up the TLBs, programs the page tables for VMID0,
 819 * sets up the hw for VMIDs 1-15 which are allocated on
 820 * demand, and sets up the global locations for the LDS, GDS,
 821 * and GPUVM for FSA64 clients (VI).
 822 * Returns 0 for success, errors for failure.
 823 */
 824static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 825{
 826	uint64_t table_addr;
 827	u32 tmp, field;
 828	int i;
 829
 830	if (adev->gart.bo == NULL) {
 831		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 832		return -EINVAL;
 833	}
 834	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
 835	table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
 836
 837	/* Setup TLB control */
 838	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
 839	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
 840	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
 841	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
 842	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
 843	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
 844	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
 845	/* Setup L2 cache */
 846	tmp = RREG32(mmVM_L2_CNTL);
 847	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
 848	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
 849	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
 850	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
 851	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
 852	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
 853	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
 854	WREG32(mmVM_L2_CNTL, tmp);
 855	tmp = RREG32(mmVM_L2_CNTL2);
 856	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
 857	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
 858	WREG32(mmVM_L2_CNTL2, tmp);
 859
 860	field = adev->vm_manager.fragment_size;
 861	tmp = RREG32(mmVM_L2_CNTL3);
 862	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
 863	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
 864	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
 865	WREG32(mmVM_L2_CNTL3, tmp);
 866	/* XXX: set to enable PTE/PDE in system memory */
 867	tmp = RREG32(mmVM_L2_CNTL4);
 868	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
 869	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
 870	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
 871	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
 872	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
 873	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
 874	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
 875	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
 876	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
 877	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
 878	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
 879	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
 880	WREG32(mmVM_L2_CNTL4, tmp);
 881	/* setup context0 */
 882	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
 883	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
 884	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
 885	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 886			(u32)(adev->dummy_page_addr >> 12));
 887	WREG32(mmVM_CONTEXT0_CNTL2, 0);
 888	tmp = RREG32(mmVM_CONTEXT0_CNTL);
 889	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
 890	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
 891	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 892	WREG32(mmVM_CONTEXT0_CNTL, tmp);
 893
 894	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
 895	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
 896	WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
 897
 898	/* empty context1-15 */
 899	/* FIXME start with 4G, once using 2 level pt switch to full
 900	 * vm size space
 901	 */
 902	/* set vm size, must be a multiple of 4 */
 903	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
 904	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
 905	for (i = 1; i < AMDGPU_NUM_VMID; i++) {
 906		if (i < 8)
 907			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
 908			       table_addr >> 12);
 909		else
 910			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
 911			       table_addr >> 12);
 912	}
 913
 914	/* enable context1-15 */
 915	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
 916	       (u32)(adev->dummy_page_addr >> 12));
 917	WREG32(mmVM_CONTEXT1_CNTL2, 4);
 918	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 919	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
 920	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
 921	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 922	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 923	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 924	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 925	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 926	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 927	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 928	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
 929			    adev->vm_manager.block_size - 9);
 930	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 931	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
 932		gmc_v8_0_set_fault_enable_default(adev, false);
 933	else
 934		gmc_v8_0_set_fault_enable_default(adev, true);
 935
 936	gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0);
 937	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 938		 (unsigned int)(adev->gmc.gart_size >> 20),
 939		 (unsigned long long)table_addr);
 
 940	return 0;
 941}
 942
 943static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
 944{
 945	int r;
 946
 947	if (adev->gart.bo) {
 948		WARN(1, "R600 PCIE GART already initialized\n");
 949		return 0;
 950	}
 951	/* Initialize common gart structure */
 952	r = amdgpu_gart_init(adev);
 953	if (r)
 954		return r;
 955	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 956	adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
 957	return amdgpu_gart_table_vram_alloc(adev);
 958}
 959
 960/**
 961 * gmc_v8_0_gart_disable - gart disable
 962 *
 963 * @adev: amdgpu_device pointer
 964 *
 965 * This disables all VM page table (VI).
 966 */
 967static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
 968{
 969	u32 tmp;
 970
 971	/* Disable all tables */
 972	WREG32(mmVM_CONTEXT0_CNTL, 0);
 973	WREG32(mmVM_CONTEXT1_CNTL, 0);
 974	/* Setup TLB control */
 975	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
 976	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
 977	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
 978	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
 979	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
 980	/* Setup L2 cache */
 981	tmp = RREG32(mmVM_L2_CNTL);
 982	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
 983	WREG32(mmVM_L2_CNTL, tmp);
 984	WREG32(mmVM_L2_CNTL2, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 985}
 986
 987/**
 988 * gmc_v8_0_vm_decode_fault - print human readable fault info
 989 *
 990 * @adev: amdgpu_device pointer
 991 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
 992 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
 993 * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
 994 * @pasid: debug logging only - no functional use
 995 *
 996 * Print human readable fault information (VI).
 997 */
 998static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
 999				     u32 addr, u32 mc_client, unsigned int pasid)
1000{
1001	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
1002	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1003					PROTECTIONS);
1004	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
1005		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
1006	u32 mc_id;
1007
1008	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1009			      MEMORY_CLIENT_ID);
1010
1011	dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
1012	       protections, vmid, pasid, addr,
1013	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1014			     MEMORY_CLIENT_RW) ?
1015	       "write" : "read", block, mc_client, mc_id);
1016}
1017
1018static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
1019{
1020	switch (mc_seq_vram_type) {
1021	case MC_SEQ_MISC0__MT__GDDR1:
1022		return AMDGPU_VRAM_TYPE_GDDR1;
1023	case MC_SEQ_MISC0__MT__DDR2:
1024		return AMDGPU_VRAM_TYPE_DDR2;
1025	case MC_SEQ_MISC0__MT__GDDR3:
1026		return AMDGPU_VRAM_TYPE_GDDR3;
1027	case MC_SEQ_MISC0__MT__GDDR4:
1028		return AMDGPU_VRAM_TYPE_GDDR4;
1029	case MC_SEQ_MISC0__MT__GDDR5:
1030		return AMDGPU_VRAM_TYPE_GDDR5;
1031	case MC_SEQ_MISC0__MT__HBM:
1032		return AMDGPU_VRAM_TYPE_HBM;
1033	case MC_SEQ_MISC0__MT__DDR3:
1034		return AMDGPU_VRAM_TYPE_DDR3;
1035	default:
1036		return AMDGPU_VRAM_TYPE_UNKNOWN;
1037	}
1038}
1039
1040static int gmc_v8_0_early_init(struct amdgpu_ip_block *ip_block)
1041{
1042	struct amdgpu_device *adev = ip_block->adev;
1043
1044	gmc_v8_0_set_gmc_funcs(adev);
1045	gmc_v8_0_set_irq_funcs(adev);
1046
1047	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1048	adev->gmc.shared_aperture_end =
1049		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1050	adev->gmc.private_aperture_start =
1051		adev->gmc.shared_aperture_end + 1;
1052	adev->gmc.private_aperture_end =
1053		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1054	adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
1055
1056	return 0;
1057}
1058
1059static int gmc_v8_0_late_init(struct amdgpu_ip_block *ip_block)
1060{
1061	struct amdgpu_device *adev = ip_block->adev;
1062
1063	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
1064		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1065	else
1066		return 0;
1067}
1068
1069static unsigned int gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
1070{
1071	u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
1072	unsigned int size;
1073
1074	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1075		size = AMDGPU_VBIOS_VGA_ALLOCATION;
1076	} else {
1077		u32 viewport = RREG32(mmVIEWPORT_SIZE);
1078
1079		size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1080			REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1081			4);
1082	}
1083
1084	return size;
1085}
1086
1087#define mmMC_SEQ_MISC0_FIJI 0xA71
1088
1089static int gmc_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
1090{
1091	int r;
1092	struct amdgpu_device *adev = ip_block->adev;
1093
1094	set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
1095
1096	if (adev->flags & AMD_IS_APU) {
1097		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
1098	} else {
1099		u32 tmp;
1100
1101		if ((adev->asic_type == CHIP_FIJI) ||
1102		    (adev->asic_type == CHIP_VEGAM))
1103			tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
1104		else
1105			tmp = RREG32(mmMC_SEQ_MISC0);
1106		tmp &= MC_SEQ_MISC0__MT__MASK;
1107		adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
1108	}
1109
1110	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
1111	if (r)
1112		return r;
1113
1114	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
1115	if (r)
1116		return r;
1117
1118	/* Adjust VM size here.
1119	 * Currently set to 4GB ((1 << 20) 4k pages).
1120	 * Max GPUVM size for cayman and SI is 40 bits.
1121	 */
1122	amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
1123
1124	/* Set the internal MC address mask
1125	 * This is the max address of the GPU's
1126	 * internal address space.
1127	 */
1128	adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1129
1130	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
 
 
 
 
 
 
 
 
 
1131	if (r) {
1132		pr_warn("No suitable DMA available\n");
1133		return r;
 
1134	}
1135	adev->need_swiotlb = drm_need_swiotlb(40);
 
 
 
 
 
1136
1137	r = gmc_v8_0_init_microcode(adev);
1138	if (r) {
1139		DRM_ERROR("Failed to load mc firmware!\n");
1140		return r;
1141	}
1142
1143	r = gmc_v8_0_mc_init(adev);
1144	if (r)
1145		return r;
1146
1147	amdgpu_gmc_get_vbios_allocations(adev);
1148
1149	/* Memory manager */
1150	r = amdgpu_bo_init(adev);
1151	if (r)
1152		return r;
1153
1154	r = gmc_v8_0_gart_init(adev);
1155	if (r)
1156		return r;
1157
1158	/*
1159	 * number of VMs
1160	 * VMID 0 is reserved for System
1161	 * amdgpu graphics/compute will use VMIDs 1-7
1162	 * amdkfd will use VMIDs 8-15
1163	 */
1164	adev->vm_manager.first_kfd_vmid = 8;
1165	amdgpu_vm_manager_init(adev);
1166
1167	/* base offset of vram pages */
1168	if (adev->flags & AMD_IS_APU) {
1169		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
1170
1171		tmp <<= 22;
1172		adev->vm_manager.vram_base_offset = tmp;
1173	} else {
1174		adev->vm_manager.vram_base_offset = 0;
1175	}
1176
1177	adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
1178					GFP_KERNEL);
1179	if (!adev->gmc.vm_fault_info)
1180		return -ENOMEM;
1181	atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1182
1183	return 0;
1184}
1185
1186static int gmc_v8_0_sw_fini(struct amdgpu_ip_block *ip_block)
1187{
1188	struct amdgpu_device *adev = ip_block->adev;
1189
1190	amdgpu_gem_force_release(adev);
1191	amdgpu_vm_manager_fini(adev);
1192	kfree(adev->gmc.vm_fault_info);
1193	amdgpu_gart_table_vram_free(adev);
1194	amdgpu_bo_fini(adev);
1195	amdgpu_ucode_release(&adev->gmc.fw);
 
1196
1197	return 0;
1198}
1199
1200static int gmc_v8_0_hw_init(struct amdgpu_ip_block *ip_block)
1201{
1202	int r;
1203	struct amdgpu_device *adev = ip_block->adev;
1204
1205	gmc_v8_0_init_golden_registers(adev);
1206
1207	gmc_v8_0_mc_program(adev);
1208
1209	if (adev->asic_type == CHIP_TONGA) {
1210		r = gmc_v8_0_tonga_mc_load_microcode(adev);
1211		if (r) {
1212			DRM_ERROR("Failed to load MC firmware!\n");
1213			return r;
1214		}
1215	} else if (adev->asic_type == CHIP_POLARIS11 ||
1216			adev->asic_type == CHIP_POLARIS10 ||
1217			adev->asic_type == CHIP_POLARIS12) {
1218		r = gmc_v8_0_polaris_mc_load_microcode(adev);
1219		if (r) {
1220			DRM_ERROR("Failed to load MC firmware!\n");
1221			return r;
1222		}
1223	}
1224
1225	r = gmc_v8_0_gart_enable(adev);
1226	if (r)
1227		return r;
1228
1229	if (amdgpu_emu_mode == 1)
1230		return amdgpu_gmc_vram_checking(adev);
1231
1232	return 0;
1233}
1234
1235static int gmc_v8_0_hw_fini(struct amdgpu_ip_block *ip_block)
1236{
1237	struct amdgpu_device *adev = ip_block->adev;
1238
1239	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1240	gmc_v8_0_gart_disable(adev);
1241
1242	return 0;
1243}
1244
1245static int gmc_v8_0_suspend(struct amdgpu_ip_block *ip_block)
1246{
1247	gmc_v8_0_hw_fini(ip_block);
 
 
1248
1249	return 0;
1250}
1251
1252static int gmc_v8_0_resume(struct amdgpu_ip_block *ip_block)
1253{
1254	int r;
 
1255
1256	r = gmc_v8_0_hw_init(ip_block);
1257	if (r)
1258		return r;
1259
1260	amdgpu_vmid_reset_all(ip_block->adev);
1261
1262	return 0;
1263}
1264
1265static bool gmc_v8_0_is_idle(void *handle)
1266{
1267	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1268	u32 tmp = RREG32(mmSRBM_STATUS);
1269
1270	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1271		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1272		return false;
1273
1274	return true;
1275}
1276
1277static int gmc_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1278{
1279	unsigned int i;
1280	u32 tmp;
1281	struct amdgpu_device *adev = ip_block->adev;
1282
1283	for (i = 0; i < adev->usec_timeout; i++) {
1284		/* read MC_STATUS */
1285		tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1286					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1287					       SRBM_STATUS__MCC_BUSY_MASK |
1288					       SRBM_STATUS__MCD_BUSY_MASK |
1289					       SRBM_STATUS__VMC_BUSY_MASK |
1290					       SRBM_STATUS__VMC1_BUSY_MASK);
1291		if (!tmp)
1292			return 0;
1293		udelay(1);
1294	}
1295	return -ETIMEDOUT;
1296
1297}
1298
1299static bool gmc_v8_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
1300{
1301	u32 srbm_soft_reset = 0;
1302	struct amdgpu_device *adev = ip_block->adev;
1303	u32 tmp = RREG32(mmSRBM_STATUS);
1304
1305	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1306		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1307						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1308
1309	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1310		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1311		if (!(adev->flags & AMD_IS_APU))
1312			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1313							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1314	}
1315
1316	if (srbm_soft_reset) {
1317		adev->gmc.srbm_soft_reset = srbm_soft_reset;
1318		return true;
 
 
 
1319	}
1320
1321	adev->gmc.srbm_soft_reset = 0;
1322
1323	return false;
1324}
1325
1326static int gmc_v8_0_pre_soft_reset(struct amdgpu_ip_block *ip_block)
1327{
1328	struct amdgpu_device *adev = ip_block->adev;
1329
1330	if (!adev->gmc.srbm_soft_reset)
1331		return 0;
1332
1333	gmc_v8_0_mc_stop(adev);
1334	if (gmc_v8_0_wait_for_idle(ip_block))
1335		dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
 
1336
1337	return 0;
1338}
1339
1340static int gmc_v8_0_soft_reset(struct amdgpu_ip_block *ip_block)
1341{
1342	struct amdgpu_device *adev = ip_block->adev;
1343	u32 srbm_soft_reset;
1344
1345	if (!adev->gmc.srbm_soft_reset)
1346		return 0;
1347	srbm_soft_reset = adev->gmc.srbm_soft_reset;
1348
1349	if (srbm_soft_reset) {
1350		u32 tmp;
1351
1352		tmp = RREG32(mmSRBM_SOFT_RESET);
1353		tmp |= srbm_soft_reset;
1354		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1355		WREG32(mmSRBM_SOFT_RESET, tmp);
1356		tmp = RREG32(mmSRBM_SOFT_RESET);
1357
1358		udelay(50);
1359
1360		tmp &= ~srbm_soft_reset;
1361		WREG32(mmSRBM_SOFT_RESET, tmp);
1362		tmp = RREG32(mmSRBM_SOFT_RESET);
1363
1364		/* Wait a little for things to settle down */
1365		udelay(50);
1366	}
1367
1368	return 0;
1369}
1370
1371static int gmc_v8_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
1372{
1373	struct amdgpu_device *adev = ip_block->adev;
1374
1375	if (!adev->gmc.srbm_soft_reset)
1376		return 0;
1377
1378	gmc_v8_0_mc_resume(adev);
1379	return 0;
1380}
1381
1382static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1383					     struct amdgpu_irq_src *src,
1384					     unsigned int type,
1385					     enum amdgpu_interrupt_state state)
1386{
1387	u32 tmp;
1388	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1389		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1390		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1391		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1392		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1393		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1394		    VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1395
1396	switch (state) {
1397	case AMDGPU_IRQ_STATE_DISABLE:
1398		/* system context */
1399		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1400		tmp &= ~bits;
1401		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1402		/* VMs */
1403		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1404		tmp &= ~bits;
1405		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1406		break;
1407	case AMDGPU_IRQ_STATE_ENABLE:
1408		/* system context */
1409		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1410		tmp |= bits;
1411		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1412		/* VMs */
1413		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1414		tmp |= bits;
1415		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1416		break;
1417	default:
1418		break;
1419	}
1420
1421	return 0;
1422}
1423
1424static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1425				      struct amdgpu_irq_src *source,
1426				      struct amdgpu_iv_entry *entry)
1427{
1428	u32 addr, status, mc_client, vmid;
1429
1430	if (amdgpu_sriov_vf(adev)) {
1431		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1432			entry->src_id, entry->src_data[0]);
1433		dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n");
1434		return 0;
1435	}
1436
1437	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1438	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1439	mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1440	/* reset addr and status */
1441	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1442
1443	if (!addr && !status)
1444		return 0;
1445
1446	amdgpu_vm_update_fault_cache(adev, entry->pasid,
1447				     ((u64)addr) << AMDGPU_GPU_PAGE_SHIFT, status, AMDGPU_GFXHUB(0));
1448
1449	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1450		gmc_v8_0_set_fault_enable_default(adev, false);
1451
1452	if (printk_ratelimit()) {
1453		struct amdgpu_task_info *task_info;
1454
1455		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1456			entry->src_id, entry->src_data[0]);
1457
1458		task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
1459		if (task_info) {
1460			dev_err(adev->dev, " for process %s pid %d thread %s pid %d\n",
1461				task_info->process_name, task_info->tgid,
1462				task_info->task_name, task_info->pid);
1463			amdgpu_vm_put_task_info(task_info);
1464		}
1465
1466		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1467				addr);
1468		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1469			status);
1470
1471		gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client,
1472					 entry->pasid);
1473	}
1474
1475	vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1476			     VMID);
1477	if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
1478		&& !atomic_read(&adev->gmc.vm_fault_info_updated)) {
1479		struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
1480		u32 protections = REG_GET_FIELD(status,
1481					VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1482					PROTECTIONS);
1483
1484		info->vmid = vmid;
1485		info->mc_id = REG_GET_FIELD(status,
1486					    VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1487					    MEMORY_CLIENT_ID);
1488		info->status = status;
1489		info->page_addr = addr;
1490		info->prot_valid = protections & 0x7 ? true : false;
1491		info->prot_read = protections & 0x8 ? true : false;
1492		info->prot_write = protections & 0x10 ? true : false;
1493		info->prot_exec = protections & 0x20 ? true : false;
1494		mb();
1495		atomic_set(&adev->gmc.vm_fault_info_updated, 1);
1496	}
1497
1498	return 0;
1499}
1500
1501static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1502						     bool enable)
1503{
1504	uint32_t data;
1505
1506	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
1507		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1508		data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1509		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1510
1511		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1512		data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1513		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1514
1515		data = RREG32(mmMC_HUB_MISC_VM_CG);
1516		data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1517		WREG32(mmMC_HUB_MISC_VM_CG, data);
1518
1519		data = RREG32(mmMC_XPB_CLK_GAT);
1520		data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1521		WREG32(mmMC_XPB_CLK_GAT, data);
1522
1523		data = RREG32(mmATC_MISC_CG);
1524		data |= ATC_MISC_CG__ENABLE_MASK;
1525		WREG32(mmATC_MISC_CG, data);
1526
1527		data = RREG32(mmMC_CITF_MISC_WR_CG);
1528		data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1529		WREG32(mmMC_CITF_MISC_WR_CG, data);
1530
1531		data = RREG32(mmMC_CITF_MISC_RD_CG);
1532		data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1533		WREG32(mmMC_CITF_MISC_RD_CG, data);
1534
1535		data = RREG32(mmMC_CITF_MISC_VM_CG);
1536		data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1537		WREG32(mmMC_CITF_MISC_VM_CG, data);
1538
1539		data = RREG32(mmVM_L2_CG);
1540		data |= VM_L2_CG__ENABLE_MASK;
1541		WREG32(mmVM_L2_CG, data);
1542	} else {
1543		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1544		data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1545		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1546
1547		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1548		data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1549		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1550
1551		data = RREG32(mmMC_HUB_MISC_VM_CG);
1552		data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1553		WREG32(mmMC_HUB_MISC_VM_CG, data);
1554
1555		data = RREG32(mmMC_XPB_CLK_GAT);
1556		data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1557		WREG32(mmMC_XPB_CLK_GAT, data);
1558
1559		data = RREG32(mmATC_MISC_CG);
1560		data &= ~ATC_MISC_CG__ENABLE_MASK;
1561		WREG32(mmATC_MISC_CG, data);
1562
1563		data = RREG32(mmMC_CITF_MISC_WR_CG);
1564		data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1565		WREG32(mmMC_CITF_MISC_WR_CG, data);
1566
1567		data = RREG32(mmMC_CITF_MISC_RD_CG);
1568		data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1569		WREG32(mmMC_CITF_MISC_RD_CG, data);
1570
1571		data = RREG32(mmMC_CITF_MISC_VM_CG);
1572		data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1573		WREG32(mmMC_CITF_MISC_VM_CG, data);
1574
1575		data = RREG32(mmVM_L2_CG);
1576		data &= ~VM_L2_CG__ENABLE_MASK;
1577		WREG32(mmVM_L2_CG, data);
1578	}
1579}
1580
1581static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1582				       bool enable)
1583{
1584	uint32_t data;
1585
1586	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
1587		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1588		data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1589		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1590
1591		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1592		data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1593		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1594
1595		data = RREG32(mmMC_HUB_MISC_VM_CG);
1596		data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1597		WREG32(mmMC_HUB_MISC_VM_CG, data);
1598
1599		data = RREG32(mmMC_XPB_CLK_GAT);
1600		data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1601		WREG32(mmMC_XPB_CLK_GAT, data);
1602
1603		data = RREG32(mmATC_MISC_CG);
1604		data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1605		WREG32(mmATC_MISC_CG, data);
1606
1607		data = RREG32(mmMC_CITF_MISC_WR_CG);
1608		data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1609		WREG32(mmMC_CITF_MISC_WR_CG, data);
1610
1611		data = RREG32(mmMC_CITF_MISC_RD_CG);
1612		data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1613		WREG32(mmMC_CITF_MISC_RD_CG, data);
1614
1615		data = RREG32(mmMC_CITF_MISC_VM_CG);
1616		data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1617		WREG32(mmMC_CITF_MISC_VM_CG, data);
1618
1619		data = RREG32(mmVM_L2_CG);
1620		data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1621		WREG32(mmVM_L2_CG, data);
1622	} else {
1623		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1624		data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1625		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1626
1627		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1628		data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1629		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1630
1631		data = RREG32(mmMC_HUB_MISC_VM_CG);
1632		data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1633		WREG32(mmMC_HUB_MISC_VM_CG, data);
1634
1635		data = RREG32(mmMC_XPB_CLK_GAT);
1636		data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1637		WREG32(mmMC_XPB_CLK_GAT, data);
1638
1639		data = RREG32(mmATC_MISC_CG);
1640		data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1641		WREG32(mmATC_MISC_CG, data);
1642
1643		data = RREG32(mmMC_CITF_MISC_WR_CG);
1644		data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1645		WREG32(mmMC_CITF_MISC_WR_CG, data);
1646
1647		data = RREG32(mmMC_CITF_MISC_RD_CG);
1648		data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1649		WREG32(mmMC_CITF_MISC_RD_CG, data);
1650
1651		data = RREG32(mmMC_CITF_MISC_VM_CG);
1652		data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1653		WREG32(mmMC_CITF_MISC_VM_CG, data);
1654
1655		data = RREG32(mmVM_L2_CG);
1656		data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1657		WREG32(mmVM_L2_CG, data);
1658	}
1659}
1660
1661static int gmc_v8_0_set_clockgating_state(void *handle,
1662					  enum amd_clockgating_state state)
1663{
1664	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1665
1666	if (amdgpu_sriov_vf(adev))
1667		return 0;
1668
1669	switch (adev->asic_type) {
1670	case CHIP_FIJI:
1671		fiji_update_mc_medium_grain_clock_gating(adev,
1672				state == AMD_CG_STATE_GATE);
1673		fiji_update_mc_light_sleep(adev,
1674				state == AMD_CG_STATE_GATE);
1675		break;
1676	default:
1677		break;
1678	}
1679	return 0;
1680}
1681
1682static int gmc_v8_0_set_powergating_state(void *handle,
1683					  enum amd_powergating_state state)
1684{
1685	return 0;
1686}
1687
1688static void gmc_v8_0_get_clockgating_state(void *handle, u64 *flags)
1689{
1690	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1691	int data;
1692
1693	if (amdgpu_sriov_vf(adev))
1694		*flags = 0;
1695
1696	/* AMD_CG_SUPPORT_MC_MGCG */
1697	data = RREG32(mmMC_HUB_MISC_HUB_CG);
1698	if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK)
1699		*flags |= AMD_CG_SUPPORT_MC_MGCG;
1700
1701	/* AMD_CG_SUPPORT_MC_LS */
1702	if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK)
1703		*flags |= AMD_CG_SUPPORT_MC_LS;
1704}
1705
1706static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1707	.name = "gmc_v8_0",
1708	.early_init = gmc_v8_0_early_init,
1709	.late_init = gmc_v8_0_late_init,
1710	.sw_init = gmc_v8_0_sw_init,
1711	.sw_fini = gmc_v8_0_sw_fini,
1712	.hw_init = gmc_v8_0_hw_init,
1713	.hw_fini = gmc_v8_0_hw_fini,
1714	.suspend = gmc_v8_0_suspend,
1715	.resume = gmc_v8_0_resume,
1716	.is_idle = gmc_v8_0_is_idle,
1717	.wait_for_idle = gmc_v8_0_wait_for_idle,
1718	.check_soft_reset = gmc_v8_0_check_soft_reset,
1719	.pre_soft_reset = gmc_v8_0_pre_soft_reset,
1720	.soft_reset = gmc_v8_0_soft_reset,
1721	.post_soft_reset = gmc_v8_0_post_soft_reset,
1722	.set_clockgating_state = gmc_v8_0_set_clockgating_state,
1723	.set_powergating_state = gmc_v8_0_set_powergating_state,
1724	.get_clockgating_state = gmc_v8_0_get_clockgating_state,
1725};
1726
1727static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
1728	.flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
1729	.flush_gpu_tlb_pasid = gmc_v8_0_flush_gpu_tlb_pasid,
1730	.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
1731	.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
 
1732	.set_prt = gmc_v8_0_set_prt,
1733	.get_vm_pde = gmc_v8_0_get_vm_pde,
1734	.get_vm_pte = gmc_v8_0_get_vm_pte,
1735	.get_vbios_fb_size = gmc_v8_0_get_vbios_fb_size,
1736};
1737
1738static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1739	.set = gmc_v8_0_vm_fault_interrupt_state,
1740	.process = gmc_v8_0_process_interrupt,
1741};
1742
1743static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
1744{
1745	adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
 
1746}
1747
1748static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1749{
1750	adev->gmc.vm_fault.num_types = 1;
1751	adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1752}
1753
1754const struct amdgpu_ip_block_version gmc_v8_0_ip_block = {
 
1755	.type = AMD_IP_BLOCK_TYPE_GMC,
1756	.major = 8,
1757	.minor = 0,
1758	.rev = 0,
1759	.funcs = &gmc_v8_0_ip_funcs,
1760};
1761
1762const struct amdgpu_ip_block_version gmc_v8_1_ip_block = {
 
1763	.type = AMD_IP_BLOCK_TYPE_GMC,
1764	.major = 8,
1765	.minor = 1,
1766	.rev = 0,
1767	.funcs = &gmc_v8_0_ip_funcs,
1768};
1769
1770const struct amdgpu_ip_block_version gmc_v8_5_ip_block = {
 
1771	.type = AMD_IP_BLOCK_TYPE_GMC,
1772	.major = 8,
1773	.minor = 5,
1774	.rev = 0,
1775	.funcs = &gmc_v8_0_ip_funcs,
1776};
v4.17
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
 
  23#include <linux/firmware.h>
  24#include <drm/drmP.h>
 
 
  25#include <drm/drm_cache.h>
  26#include "amdgpu.h"
  27#include "gmc_v8_0.h"
  28#include "amdgpu_ucode.h"
 
 
  29
  30#include "gmc/gmc_8_1_d.h"
  31#include "gmc/gmc_8_1_sh_mask.h"
  32
  33#include "bif/bif_5_0_d.h"
  34#include "bif/bif_5_0_sh_mask.h"
  35
  36#include "oss/oss_3_0_d.h"
  37#include "oss/oss_3_0_sh_mask.h"
  38
  39#include "dce/dce_10_0_d.h"
  40#include "dce/dce_10_0_sh_mask.h"
  41
  42#include "vid.h"
  43#include "vi.h"
  44
  45#include "amdgpu_atombios.h"
  46
 
  47
  48static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
  49static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
  50static int gmc_v8_0_wait_for_idle(void *handle);
  51
  52MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
  53MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
  54MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
  55MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
 
 
 
 
  56
  57static const u32 golden_settings_tonga_a11[] =
  58{
  59	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
  60	mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
  61	mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
  62	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  63	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  64	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  65	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  66};
  67
  68static const u32 tonga_mgcg_cgcg_init[] =
  69{
  70	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  71};
  72
  73static const u32 golden_settings_fiji_a10[] =
  74{
  75	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  76	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  77	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  78	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  79};
  80
  81static const u32 fiji_mgcg_cgcg_init[] =
  82{
  83	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  84};
  85
  86static const u32 golden_settings_polaris11_a11[] =
  87{
  88	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  89	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  90	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  91	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
  92};
  93
  94static const u32 golden_settings_polaris10_a11[] =
  95{
  96	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
  97	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  98	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  99	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
 100	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
 101};
 102
 103static const u32 cz_mgcg_cgcg_init[] =
 104{
 105	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 106};
 107
 108static const u32 stoney_mgcg_cgcg_init[] =
 109{
 110	mmATC_MISC_CG, 0xffffffff, 0x000c0200,
 111	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 112};
 113
 114static const u32 golden_settings_stoney_common[] =
 115{
 116	mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
 117	mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
 118};
 119
 120static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
 121{
 122	switch (adev->asic_type) {
 123	case CHIP_FIJI:
 124		amdgpu_device_program_register_sequence(adev,
 125							fiji_mgcg_cgcg_init,
 126							ARRAY_SIZE(fiji_mgcg_cgcg_init));
 127		amdgpu_device_program_register_sequence(adev,
 128							golden_settings_fiji_a10,
 129							ARRAY_SIZE(golden_settings_fiji_a10));
 130		break;
 131	case CHIP_TONGA:
 132		amdgpu_device_program_register_sequence(adev,
 133							tonga_mgcg_cgcg_init,
 134							ARRAY_SIZE(tonga_mgcg_cgcg_init));
 135		amdgpu_device_program_register_sequence(adev,
 136							golden_settings_tonga_a11,
 137							ARRAY_SIZE(golden_settings_tonga_a11));
 138		break;
 139	case CHIP_POLARIS11:
 140	case CHIP_POLARIS12:
 
 141		amdgpu_device_program_register_sequence(adev,
 142							golden_settings_polaris11_a11,
 143							ARRAY_SIZE(golden_settings_polaris11_a11));
 144		break;
 145	case CHIP_POLARIS10:
 146		amdgpu_device_program_register_sequence(adev,
 147							golden_settings_polaris10_a11,
 148							ARRAY_SIZE(golden_settings_polaris10_a11));
 149		break;
 150	case CHIP_CARRIZO:
 151		amdgpu_device_program_register_sequence(adev,
 152							cz_mgcg_cgcg_init,
 153							ARRAY_SIZE(cz_mgcg_cgcg_init));
 154		break;
 155	case CHIP_STONEY:
 156		amdgpu_device_program_register_sequence(adev,
 157							stoney_mgcg_cgcg_init,
 158							ARRAY_SIZE(stoney_mgcg_cgcg_init));
 159		amdgpu_device_program_register_sequence(adev,
 160							golden_settings_stoney_common,
 161							ARRAY_SIZE(golden_settings_stoney_common));
 162		break;
 163	default:
 164		break;
 165	}
 166}
 167
 168static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
 169{
 170	u32 blackout;
 
 171
 172	gmc_v8_0_wait_for_idle(adev);
 
 
 
 
 173
 174	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 175	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
 176		/* Block CPU access */
 177		WREG32(mmBIF_FB_EN, 0);
 178		/* blackout the MC */
 179		blackout = REG_SET_FIELD(blackout,
 180					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
 181		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
 182	}
 183	/* wait for the MC to settle */
 184	udelay(100);
 185}
 186
 187static void gmc_v8_0_mc_resume(struct amdgpu_device *adev)
 188{
 189	u32 tmp;
 190
 191	/* unblackout the MC */
 192	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 193	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
 194	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
 195	/* allow CPU access */
 196	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
 197	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
 198	WREG32(mmBIF_FB_EN, tmp);
 199}
 200
 201/**
 202 * gmc_v8_0_init_microcode - load ucode images from disk
 203 *
 204 * @adev: amdgpu_device pointer
 205 *
 206 * Use the firmware interface to load the ucode images into
 207 * the driver (not loaded into hw).
 208 * Returns 0 on success, error on failure.
 209 */
 210static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
 211{
 212	const char *chip_name;
 213	char fw_name[30];
 214	int err;
 215
 216	DRM_DEBUG("\n");
 217
 218	switch (adev->asic_type) {
 219	case CHIP_TONGA:
 220		chip_name = "tonga";
 221		break;
 222	case CHIP_POLARIS11:
 223		chip_name = "polaris11";
 
 
 
 
 224		break;
 225	case CHIP_POLARIS10:
 226		chip_name = "polaris10";
 
 
 
 227		break;
 228	case CHIP_POLARIS12:
 229		chip_name = "polaris12";
 
 
 
 
 
 
 
 
 
 230		break;
 231	case CHIP_FIJI:
 232	case CHIP_CARRIZO:
 233	case CHIP_STONEY:
 
 234		return 0;
 235	default: BUG();
 
 236	}
 237
 238	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
 239	err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
 240	if (err)
 241		goto out;
 242	err = amdgpu_ucode_validate(adev->gmc.fw);
 243
 244out:
 245	if (err) {
 246		pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
 247		release_firmware(adev->gmc.fw);
 248		adev->gmc.fw = NULL;
 249	}
 250	return err;
 251}
 252
 253/**
 254 * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw
 255 *
 256 * @adev: amdgpu_device pointer
 257 *
 258 * Load the GDDR MC ucode into the hw (CIK).
 259 * Returns 0 on success, error on failure.
 260 */
 261static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
 262{
 263	const struct mc_firmware_header_v1_0 *hdr;
 264	const __le32 *fw_data = NULL;
 265	const __le32 *io_mc_regs = NULL;
 266	u32 running;
 267	int i, ucode_size, regs_size;
 268
 269	/* Skip MC ucode loading on SR-IOV capable boards.
 270	 * vbios does this for us in asic_init in that case.
 271	 * Skip MC ucode loading on VF, because hypervisor will do that
 272	 * for this adaptor.
 273	 */
 274	if (amdgpu_sriov_bios(adev))
 275		return 0;
 276
 277	if (!adev->gmc.fw)
 278		return -EINVAL;
 279
 280	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
 281	amdgpu_ucode_print_mc_hdr(&hdr->header);
 282
 283	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 284	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
 285	io_mc_regs = (const __le32 *)
 286		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
 287	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 288	fw_data = (const __le32 *)
 289		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 290
 291	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
 292
 293	if (running == 0) {
 294		/* reset the engine and set to writable */
 295		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 296		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
 297
 298		/* load mc io regs */
 299		for (i = 0; i < regs_size; i++) {
 300			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
 301			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
 302		}
 303		/* load the MC ucode */
 304		for (i = 0; i < ucode_size; i++)
 305			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
 306
 307		/* put the engine back into the active state */
 308		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 309		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
 310		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
 311
 312		/* wait for training to complete */
 313		for (i = 0; i < adev->usec_timeout; i++) {
 314			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
 315					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
 316				break;
 317			udelay(1);
 318		}
 319		for (i = 0; i < adev->usec_timeout; i++) {
 320			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
 321					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
 322				break;
 323			udelay(1);
 324		}
 325	}
 326
 327	return 0;
 328}
 329
 330static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
 331{
 332	const struct mc_firmware_header_v1_0 *hdr;
 333	const __le32 *fw_data = NULL;
 334	const __le32 *io_mc_regs = NULL;
 335	u32 data, vbios_version;
 336	int i, ucode_size, regs_size;
 337
 338	/* Skip MC ucode loading on SR-IOV capable boards.
 339	 * vbios does this for us in asic_init in that case.
 340	 * Skip MC ucode loading on VF, because hypervisor will do that
 341	 * for this adaptor.
 342	 */
 343	if (amdgpu_sriov_bios(adev))
 344		return 0;
 345
 346	WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
 347	data = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
 348	vbios_version = data & 0xf;
 349
 350	if (vbios_version == 0)
 351		return 0;
 352
 353	if (!adev->gmc.fw)
 354		return -EINVAL;
 355
 356	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
 357	amdgpu_ucode_print_mc_hdr(&hdr->header);
 358
 359	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 360	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
 361	io_mc_regs = (const __le32 *)
 362		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
 363	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 364	fw_data = (const __le32 *)
 365		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 366
 367	data = RREG32(mmMC_SEQ_MISC0);
 368	data &= ~(0x40);
 369	WREG32(mmMC_SEQ_MISC0, data);
 370
 371	/* load mc io regs */
 372	for (i = 0; i < regs_size; i++) {
 373		WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
 374		WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
 375	}
 376
 377	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 378	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
 379
 380	/* load the MC ucode */
 381	for (i = 0; i < ucode_size; i++)
 382		WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
 383
 384	/* put the engine back into the active state */
 385	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 386	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
 387	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
 388
 389	/* wait for training to complete */
 390	for (i = 0; i < adev->usec_timeout; i++) {
 391		data = RREG32(mmMC_SEQ_MISC0);
 392		if (data & 0x80)
 393			break;
 394		udelay(1);
 395	}
 396
 397	return 0;
 398}
 399
 400static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
 401				       struct amdgpu_gmc *mc)
 402{
 403	u64 base = 0;
 404
 405	if (!amdgpu_sriov_vf(adev))
 406		base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
 407	base <<= 24;
 408
 409	amdgpu_device_vram_location(adev, &adev->gmc, base);
 410	amdgpu_device_gart_location(adev, mc);
 
 411}
 412
 413/**
 414 * gmc_v8_0_mc_program - program the GPU memory controller
 415 *
 416 * @adev: amdgpu_device pointer
 417 *
 418 * Set the location of vram, gart, and AGP in the GPU's
 419 * physical address space (CIK).
 420 */
 421static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
 422{
 
 423	u32 tmp;
 424	int i, j;
 425
 426	/* Initialize HDP */
 427	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
 428		WREG32((0xb05 + j), 0x00000000);
 429		WREG32((0xb06 + j), 0x00000000);
 430		WREG32((0xb07 + j), 0x00000000);
 431		WREG32((0xb08 + j), 0x00000000);
 432		WREG32((0xb09 + j), 0x00000000);
 433	}
 434	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 435
 436	if (gmc_v8_0_wait_for_idle((void *)adev)) {
 
 
 
 
 437		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 438	}
 439	if (adev->mode_info.num_crtc) {
 440		/* Lockout access through VGA aperture*/
 441		tmp = RREG32(mmVGA_HDP_CONTROL);
 442		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
 443		WREG32(mmVGA_HDP_CONTROL, tmp);
 444
 445		/* disable VGA render */
 446		tmp = RREG32(mmVGA_RENDER_CONTROL);
 447		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
 448		WREG32(mmVGA_RENDER_CONTROL, tmp);
 449	}
 450	/* Update configuration */
 451	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 452	       adev->gmc.vram_start >> 12);
 453	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 454	       adev->gmc.vram_end >> 12);
 455	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
 456	       adev->vram_scratch.gpu_addr >> 12);
 457
 458	if (amdgpu_sriov_vf(adev)) {
 459		tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
 460		tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF);
 461		WREG32(mmMC_VM_FB_LOCATION, tmp);
 462		/* XXX double check these! */
 463		WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
 464		WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
 465		WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
 466	}
 467
 468	WREG32(mmMC_VM_AGP_BASE, 0);
 469	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
 470	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
 471	if (gmc_v8_0_wait_for_idle((void *)adev)) {
 472		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 473	}
 474
 475	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
 476
 477	tmp = RREG32(mmHDP_MISC_CNTL);
 478	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
 479	WREG32(mmHDP_MISC_CNTL, tmp);
 480
 481	tmp = RREG32(mmHDP_HOST_PATH_CNTL);
 482	WREG32(mmHDP_HOST_PATH_CNTL, tmp);
 483}
 484
 485/**
 486 * gmc_v8_0_mc_init - initialize the memory controller driver params
 487 *
 488 * @adev: amdgpu_device pointer
 489 *
 490 * Look up the amount of vram, vram width, and decide how to place
 491 * vram and gart within the GPU's physical address space (CIK).
 492 * Returns 0 for success.
 493 */
 494static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
 495{
 496	int r;
 
 497
 498	adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
 499	if (!adev->gmc.vram_width) {
 500		u32 tmp;
 501		int chansize, numchan;
 502
 503		/* Get VRAM informations */
 504		tmp = RREG32(mmMC_ARB_RAMCFG);
 505		if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
 506			chansize = 64;
 507		} else {
 508			chansize = 32;
 509		}
 510		tmp = RREG32(mmMC_SHARED_CHMAP);
 511		switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
 512		case 0:
 513		default:
 514			numchan = 1;
 515			break;
 516		case 1:
 517			numchan = 2;
 518			break;
 519		case 2:
 520			numchan = 4;
 521			break;
 522		case 3:
 523			numchan = 8;
 524			break;
 525		case 4:
 526			numchan = 3;
 527			break;
 528		case 5:
 529			numchan = 6;
 530			break;
 531		case 6:
 532			numchan = 10;
 533			break;
 534		case 7:
 535			numchan = 12;
 536			break;
 537		case 8:
 538			numchan = 16;
 539			break;
 540		}
 541		adev->gmc.vram_width = numchan * chansize;
 542	}
 543	/* size in MB on si */
 544	adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 545	adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 
 
 
 
 
 
 
 546
 547	if (!(adev->flags & AMD_IS_APU)) {
 548		r = amdgpu_device_resize_fb_bar(adev);
 549		if (r)
 550			return r;
 551	}
 552	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
 553	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 554
 555#ifdef CONFIG_X86_64
 556	if (adev->flags & AMD_IS_APU) {
 557		adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
 558		adev->gmc.aper_size = adev->gmc.real_vram_size;
 559	}
 560#endif
 561
 562	/* In case the PCI BAR is larger than the actual amount of vram */
 563	adev->gmc.visible_vram_size = adev->gmc.aper_size;
 564	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
 565		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
 566
 567	/* set the gart size */
 568	if (amdgpu_gart_size == -1) {
 569		switch (adev->asic_type) {
 
 570		case CHIP_POLARIS11: /* all engines support GPUVM */
 571		case CHIP_POLARIS10: /* all engines support GPUVM */
 572		case CHIP_POLARIS12: /* all engines support GPUVM */
 
 573		default:
 574			adev->gmc.gart_size = 256ULL << 20;
 575			break;
 576		case CHIP_TONGA:   /* UVD, VCE do not support GPUVM */
 577		case CHIP_FIJI:    /* UVD, VCE do not support GPUVM */
 578		case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
 579		case CHIP_STONEY:  /* UVD does not support GPUVM, DCE SG support */
 580			adev->gmc.gart_size = 1024ULL << 20;
 581			break;
 582		}
 583	} else {
 584		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
 585	}
 586
 
 587	gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
 588
 589	return 0;
 590}
 591
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 592/*
 593 * GART
 594 * VMID 0 is the physical GPU addresses as used by the kernel.
 595 * VMIDs 1-15 are used for userspace clients and are handled
 596 * by the amdgpu vm/hsa code.
 597 */
 598
 599/**
 600 * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
 601 *
 602 * @adev: amdgpu_device pointer
 603 * @vmid: vm instance to flush
 
 
 604 *
 605 * Flush the TLB for the requested page table (CIK).
 606 */
 607static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
 608					uint32_t vmid)
 609{
 610	/* bits 0-15 are the VM contexts0-15 */
 611	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 612}
 613
 614static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 615					    unsigned vmid, uint64_t pd_addr)
 616{
 617	uint32_t reg;
 618
 619	if (vmid < 8)
 620		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
 621	else
 622		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
 623	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
 624
 625	/* bits 0-15 are the VM contexts0-15 */
 626	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
 627
 628	return pd_addr;
 629}
 630
 631static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
 632					unsigned pasid)
 633{
 634	amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
 635}
 636
 637/**
 638 * gmc_v8_0_set_pte_pde - update the page tables using MMIO
 639 *
 640 * @adev: amdgpu_device pointer
 641 * @cpu_pt_addr: cpu address of the page table
 642 * @gpu_page_idx: entry in the page table to update
 643 * @addr: dst addr to write into pte/pde
 644 * @flags: access flags
 645 *
 646 * Update the page tables using the CPU.
 
 
 
 
 
 
 
 
 
 647 */
 648static int gmc_v8_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
 649				uint32_t gpu_page_idx, uint64_t addr,
 650				uint64_t flags)
 651{
 652	void __iomem *ptr = (void *)cpu_pt_addr;
 653	uint64_t value;
 654
 655	/*
 656	 * PTE format on VI:
 657	 * 63:40 reserved
 658	 * 39:12 4k physical page base address
 659	 * 11:7 fragment
 660	 * 6 write
 661	 * 5 read
 662	 * 4 exe
 663	 * 3 reserved
 664	 * 2 snooped
 665	 * 1 system
 666	 * 0 valid
 667	 *
 668	 * PDE format on VI:
 669	 * 63:59 block fragment size
 670	 * 58:40 reserved
 671	 * 39:1 physical base address of PTE
 672	 * bits 5:1 must be 0.
 673	 * 0 valid
 674	 */
 675	value = addr & 0x000000FFFFFFF000ULL;
 676	value |= flags;
 677	writeq(value, ptr + (gpu_page_idx * 8));
 678
 679	return 0;
 680}
 681
 682static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
 683					  uint32_t flags)
 684{
 685	uint64_t pte_flag = 0;
 686
 687	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
 688		pte_flag |= AMDGPU_PTE_EXECUTABLE;
 689	if (flags & AMDGPU_VM_PAGE_READABLE)
 690		pte_flag |= AMDGPU_PTE_READABLE;
 691	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
 692		pte_flag |= AMDGPU_PTE_WRITEABLE;
 693	if (flags & AMDGPU_VM_PAGE_PRT)
 694		pte_flag |= AMDGPU_PTE_PRT;
 695
 696	return pte_flag;
 697}
 698
 699static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
 700				uint64_t *addr, uint64_t *flags)
 701{
 702	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
 703}
 704
 
 
 
 
 
 
 
 
 
 705/**
 706 * gmc_v8_0_set_fault_enable_default - update VM fault handling
 707 *
 708 * @adev: amdgpu_device pointer
 709 * @value: true redirects VM faults to the default page
 710 */
 711static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
 712					      bool value)
 713{
 714	u32 tmp;
 715
 716	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 717	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 718			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 719	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 720			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 721	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 722			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 723	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 724			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 725	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 726			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 727	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 728			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 729	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 730			    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 731	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 732}
 733
 734/**
 735 * gmc_v8_0_set_prt - set PRT VM fault
 736 *
 737 * @adev: amdgpu_device pointer
 738 * @enable: enable/disable VM fault handling for PRT
 739*/
 740static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
 741{
 742	u32 tmp;
 743
 744	if (enable && !adev->gmc.prt_warning) {
 745		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
 746		adev->gmc.prt_warning = true;
 747	}
 748
 749	tmp = RREG32(mmVM_PRT_CNTL);
 750	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 751			    CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
 752	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 753			    CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
 754	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 755			    TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
 756	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 757			    TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
 758	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 759			    L2_CACHE_STORE_INVALID_ENTRIES, enable);
 760	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 761			    L1_TLB_STORE_INVALID_ENTRIES, enable);
 762	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 763			    MASK_PDE0_FAULT, enable);
 764	WREG32(mmVM_PRT_CNTL, tmp);
 765
 766	if (enable) {
 767		uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
 
 768		uint32_t high = adev->vm_manager.max_pfn -
 769			(AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
 770
 771		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
 772		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
 773		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
 774		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
 775		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
 776		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
 777		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
 778		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
 779	} else {
 780		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
 781		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
 782		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
 783		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
 784		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
 785		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
 786		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
 787		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
 788	}
 789}
 790
 791/**
 792 * gmc_v8_0_gart_enable - gart enable
 793 *
 794 * @adev: amdgpu_device pointer
 795 *
 796 * This sets up the TLBs, programs the page tables for VMID0,
 797 * sets up the hw for VMIDs 1-15 which are allocated on
 798 * demand, and sets up the global locations for the LDS, GDS,
 799 * and GPUVM for FSA64 clients (CIK).
 800 * Returns 0 for success, errors for failure.
 801 */
 802static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 803{
 804	int r, i;
 805	u32 tmp, field;
 
 806
 807	if (adev->gart.robj == NULL) {
 808		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 809		return -EINVAL;
 810	}
 811	r = amdgpu_gart_table_vram_pin(adev);
 812	if (r)
 813		return r;
 814	/* Setup TLB control */
 815	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
 816	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
 817	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
 818	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
 819	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
 820	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
 821	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
 822	/* Setup L2 cache */
 823	tmp = RREG32(mmVM_L2_CNTL);
 824	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
 825	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
 826	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
 827	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
 828	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
 829	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
 830	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
 831	WREG32(mmVM_L2_CNTL, tmp);
 832	tmp = RREG32(mmVM_L2_CNTL2);
 833	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
 834	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
 835	WREG32(mmVM_L2_CNTL2, tmp);
 836
 837	field = adev->vm_manager.fragment_size;
 838	tmp = RREG32(mmVM_L2_CNTL3);
 839	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
 840	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
 841	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
 842	WREG32(mmVM_L2_CNTL3, tmp);
 843	/* XXX: set to enable PTE/PDE in system memory */
 844	tmp = RREG32(mmVM_L2_CNTL4);
 845	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
 846	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
 847	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
 848	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
 849	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
 850	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
 851	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
 852	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
 853	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
 854	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
 855	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
 856	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
 857	WREG32(mmVM_L2_CNTL4, tmp);
 858	/* setup context0 */
 859	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
 860	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
 861	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
 862	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 863			(u32)(adev->dummy_page_addr >> 12));
 864	WREG32(mmVM_CONTEXT0_CNTL2, 0);
 865	tmp = RREG32(mmVM_CONTEXT0_CNTL);
 866	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
 867	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
 868	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 869	WREG32(mmVM_CONTEXT0_CNTL, tmp);
 870
 871	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
 872	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
 873	WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
 874
 875	/* empty context1-15 */
 876	/* FIXME start with 4G, once using 2 level pt switch to full
 877	 * vm size space
 878	 */
 879	/* set vm size, must be a multiple of 4 */
 880	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
 881	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
 882	for (i = 1; i < 16; i++) {
 883		if (i < 8)
 884			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
 885			       adev->gart.table_addr >> 12);
 886		else
 887			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
 888			       adev->gart.table_addr >> 12);
 889	}
 890
 891	/* enable context1-15 */
 892	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
 893	       (u32)(adev->dummy_page_addr >> 12));
 894	WREG32(mmVM_CONTEXT1_CNTL2, 4);
 895	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 896	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
 897	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
 898	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 899	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 900	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 901	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 902	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 903	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 904	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 905	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
 906			    adev->vm_manager.block_size - 9);
 907	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 908	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
 909		gmc_v8_0_set_fault_enable_default(adev, false);
 910	else
 911		gmc_v8_0_set_fault_enable_default(adev, true);
 912
 913	gmc_v8_0_flush_gpu_tlb(adev, 0);
 914	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 915		 (unsigned)(adev->gmc.gart_size >> 20),
 916		 (unsigned long long)adev->gart.table_addr);
 917	adev->gart.ready = true;
 918	return 0;
 919}
 920
 921static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
 922{
 923	int r;
 924
 925	if (adev->gart.robj) {
 926		WARN(1, "R600 PCIE GART already initialized\n");
 927		return 0;
 928	}
 929	/* Initialize common gart structure */
 930	r = amdgpu_gart_init(adev);
 931	if (r)
 932		return r;
 933	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 934	adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
 935	return amdgpu_gart_table_vram_alloc(adev);
 936}
 937
 938/**
 939 * gmc_v8_0_gart_disable - gart disable
 940 *
 941 * @adev: amdgpu_device pointer
 942 *
 943 * This disables all VM page table (CIK).
 944 */
 945static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
 946{
 947	u32 tmp;
 948
 949	/* Disable all tables */
 950	WREG32(mmVM_CONTEXT0_CNTL, 0);
 951	WREG32(mmVM_CONTEXT1_CNTL, 0);
 952	/* Setup TLB control */
 953	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
 954	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
 955	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
 956	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
 957	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
 958	/* Setup L2 cache */
 959	tmp = RREG32(mmVM_L2_CNTL);
 960	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
 961	WREG32(mmVM_L2_CNTL, tmp);
 962	WREG32(mmVM_L2_CNTL2, 0);
 963	amdgpu_gart_table_vram_unpin(adev);
 964}
 965
 966/**
 967 * gmc_v8_0_gart_fini - vm fini callback
 968 *
 969 * @adev: amdgpu_device pointer
 970 *
 971 * Tears down the driver GART/VM setup (CIK).
 972 */
 973static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
 974{
 975	amdgpu_gart_table_vram_free(adev);
 976	amdgpu_gart_fini(adev);
 977}
 978
 979/**
 980 * gmc_v8_0_vm_decode_fault - print human readable fault info
 981 *
 982 * @adev: amdgpu_device pointer
 983 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
 984 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
 
 
 985 *
 986 * Print human readable fault information (CIK).
 987 */
 988static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
 989				     u32 addr, u32 mc_client, unsigned pasid)
 990{
 991	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
 992	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 993					PROTECTIONS);
 994	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
 995		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
 996	u32 mc_id;
 997
 998	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 999			      MEMORY_CLIENT_ID);
1000
1001	dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
1002	       protections, vmid, pasid, addr,
1003	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1004			     MEMORY_CLIENT_RW) ?
1005	       "write" : "read", block, mc_client, mc_id);
1006}
1007
1008static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
1009{
1010	switch (mc_seq_vram_type) {
1011	case MC_SEQ_MISC0__MT__GDDR1:
1012		return AMDGPU_VRAM_TYPE_GDDR1;
1013	case MC_SEQ_MISC0__MT__DDR2:
1014		return AMDGPU_VRAM_TYPE_DDR2;
1015	case MC_SEQ_MISC0__MT__GDDR3:
1016		return AMDGPU_VRAM_TYPE_GDDR3;
1017	case MC_SEQ_MISC0__MT__GDDR4:
1018		return AMDGPU_VRAM_TYPE_GDDR4;
1019	case MC_SEQ_MISC0__MT__GDDR5:
1020		return AMDGPU_VRAM_TYPE_GDDR5;
1021	case MC_SEQ_MISC0__MT__HBM:
1022		return AMDGPU_VRAM_TYPE_HBM;
1023	case MC_SEQ_MISC0__MT__DDR3:
1024		return AMDGPU_VRAM_TYPE_DDR3;
1025	default:
1026		return AMDGPU_VRAM_TYPE_UNKNOWN;
1027	}
1028}
1029
1030static int gmc_v8_0_early_init(void *handle)
1031{
1032	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1033
1034	gmc_v8_0_set_gmc_funcs(adev);
1035	gmc_v8_0_set_irq_funcs(adev);
1036
1037	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1038	adev->gmc.shared_aperture_end =
1039		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1040	adev->gmc.private_aperture_start =
1041		adev->gmc.shared_aperture_end + 1;
1042	adev->gmc.private_aperture_end =
1043		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
 
1044
1045	return 0;
1046}
1047
1048static int gmc_v8_0_late_init(void *handle)
1049{
1050	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1051
1052	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
1053		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1054	else
1055		return 0;
1056}
1057
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1058#define mmMC_SEQ_MISC0_FIJI 0xA71
1059
1060static int gmc_v8_0_sw_init(void *handle)
1061{
1062	int r;
1063	int dma_bits;
1064	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
1065
1066	if (adev->flags & AMD_IS_APU) {
1067		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
1068	} else {
1069		u32 tmp;
1070
1071		if (adev->asic_type == CHIP_FIJI)
 
1072			tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
1073		else
1074			tmp = RREG32(mmMC_SEQ_MISC0);
1075		tmp &= MC_SEQ_MISC0__MT__MASK;
1076		adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
1077	}
1078
1079	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
1080	if (r)
1081		return r;
1082
1083	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
1084	if (r)
1085		return r;
1086
1087	/* Adjust VM size here.
1088	 * Currently set to 4GB ((1 << 20) 4k pages).
1089	 * Max GPUVM size for cayman and SI is 40 bits.
1090	 */
1091	amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
1092
1093	/* Set the internal MC address mask
1094	 * This is the max address of the GPU's
1095	 * internal address space.
1096	 */
1097	adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1098
1099	adev->gmc.stolen_size = 256 * 1024;
1100
1101	/* set DMA mask + need_dma32 flags.
1102	 * PCIE - can handle 40-bits.
1103	 * IGP - can handle 40-bits
1104	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1105	 */
1106	adev->need_dma32 = false;
1107	dma_bits = adev->need_dma32 ? 32 : 40;
1108	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1109	if (r) {
1110		adev->need_dma32 = true;
1111		dma_bits = 32;
1112		pr_warn("amdgpu: No suitable DMA available\n");
1113	}
1114	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1115	if (r) {
1116		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
1117		pr_warn("amdgpu: No coherent DMA available\n");
1118	}
1119	adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
1120
1121	r = gmc_v8_0_init_microcode(adev);
1122	if (r) {
1123		DRM_ERROR("Failed to load mc firmware!\n");
1124		return r;
1125	}
1126
1127	r = gmc_v8_0_mc_init(adev);
1128	if (r)
1129		return r;
1130
 
 
1131	/* Memory manager */
1132	r = amdgpu_bo_init(adev);
1133	if (r)
1134		return r;
1135
1136	r = gmc_v8_0_gart_init(adev);
1137	if (r)
1138		return r;
1139
1140	/*
1141	 * number of VMs
1142	 * VMID 0 is reserved for System
1143	 * amdgpu graphics/compute will use VMIDs 1-7
1144	 * amdkfd will use VMIDs 8-15
1145	 */
1146	adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
1147	amdgpu_vm_manager_init(adev);
1148
1149	/* base offset of vram pages */
1150	if (adev->flags & AMD_IS_APU) {
1151		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
1152
1153		tmp <<= 22;
1154		adev->vm_manager.vram_base_offset = tmp;
1155	} else {
1156		adev->vm_manager.vram_base_offset = 0;
1157	}
1158
 
 
 
 
 
 
1159	return 0;
1160}
1161
1162static int gmc_v8_0_sw_fini(void *handle)
1163{
1164	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1165
1166	amdgpu_gem_force_release(adev);
1167	amdgpu_vm_manager_fini(adev);
1168	gmc_v8_0_gart_fini(adev);
 
1169	amdgpu_bo_fini(adev);
1170	release_firmware(adev->gmc.fw);
1171	adev->gmc.fw = NULL;
1172
1173	return 0;
1174}
1175
1176static int gmc_v8_0_hw_init(void *handle)
1177{
1178	int r;
1179	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1180
1181	gmc_v8_0_init_golden_registers(adev);
1182
1183	gmc_v8_0_mc_program(adev);
1184
1185	if (adev->asic_type == CHIP_TONGA) {
1186		r = gmc_v8_0_tonga_mc_load_microcode(adev);
1187		if (r) {
1188			DRM_ERROR("Failed to load MC firmware!\n");
1189			return r;
1190		}
1191	} else if (adev->asic_type == CHIP_POLARIS11 ||
1192			adev->asic_type == CHIP_POLARIS10 ||
1193			adev->asic_type == CHIP_POLARIS12) {
1194		r = gmc_v8_0_polaris_mc_load_microcode(adev);
1195		if (r) {
1196			DRM_ERROR("Failed to load MC firmware!\n");
1197			return r;
1198		}
1199	}
1200
1201	r = gmc_v8_0_gart_enable(adev);
1202	if (r)
1203		return r;
1204
1205	return r;
 
 
 
1206}
1207
1208static int gmc_v8_0_hw_fini(void *handle)
1209{
1210	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1211
1212	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1213	gmc_v8_0_gart_disable(adev);
1214
1215	return 0;
1216}
1217
1218static int gmc_v8_0_suspend(void *handle)
1219{
1220	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1221
1222	gmc_v8_0_hw_fini(adev);
1223
1224	return 0;
1225}
1226
1227static int gmc_v8_0_resume(void *handle)
1228{
1229	int r;
1230	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1231
1232	r = gmc_v8_0_hw_init(adev);
1233	if (r)
1234		return r;
1235
1236	amdgpu_vmid_reset_all(adev);
1237
1238	return 0;
1239}
1240
1241static bool gmc_v8_0_is_idle(void *handle)
1242{
1243	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1244	u32 tmp = RREG32(mmSRBM_STATUS);
1245
1246	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1247		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1248		return false;
1249
1250	return true;
1251}
1252
1253static int gmc_v8_0_wait_for_idle(void *handle)
1254{
1255	unsigned i;
1256	u32 tmp;
1257	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1258
1259	for (i = 0; i < adev->usec_timeout; i++) {
1260		/* read MC_STATUS */
1261		tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1262					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1263					       SRBM_STATUS__MCC_BUSY_MASK |
1264					       SRBM_STATUS__MCD_BUSY_MASK |
1265					       SRBM_STATUS__VMC_BUSY_MASK |
1266					       SRBM_STATUS__VMC1_BUSY_MASK);
1267		if (!tmp)
1268			return 0;
1269		udelay(1);
1270	}
1271	return -ETIMEDOUT;
1272
1273}
1274
1275static bool gmc_v8_0_check_soft_reset(void *handle)
1276{
1277	u32 srbm_soft_reset = 0;
1278	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1279	u32 tmp = RREG32(mmSRBM_STATUS);
1280
1281	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1282		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1283						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1284
1285	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1286		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1287		if (!(adev->flags & AMD_IS_APU))
1288			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1289							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1290	}
 
1291	if (srbm_soft_reset) {
1292		adev->gmc.srbm_soft_reset = srbm_soft_reset;
1293		return true;
1294	} else {
1295		adev->gmc.srbm_soft_reset = 0;
1296		return false;
1297	}
 
 
 
 
1298}
1299
1300static int gmc_v8_0_pre_soft_reset(void *handle)
1301{
1302	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1303
1304	if (!adev->gmc.srbm_soft_reset)
1305		return 0;
1306
1307	gmc_v8_0_mc_stop(adev);
1308	if (gmc_v8_0_wait_for_idle(adev)) {
1309		dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1310	}
1311
1312	return 0;
1313}
1314
1315static int gmc_v8_0_soft_reset(void *handle)
1316{
1317	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1318	u32 srbm_soft_reset;
1319
1320	if (!adev->gmc.srbm_soft_reset)
1321		return 0;
1322	srbm_soft_reset = adev->gmc.srbm_soft_reset;
1323
1324	if (srbm_soft_reset) {
1325		u32 tmp;
1326
1327		tmp = RREG32(mmSRBM_SOFT_RESET);
1328		tmp |= srbm_soft_reset;
1329		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1330		WREG32(mmSRBM_SOFT_RESET, tmp);
1331		tmp = RREG32(mmSRBM_SOFT_RESET);
1332
1333		udelay(50);
1334
1335		tmp &= ~srbm_soft_reset;
1336		WREG32(mmSRBM_SOFT_RESET, tmp);
1337		tmp = RREG32(mmSRBM_SOFT_RESET);
1338
1339		/* Wait a little for things to settle down */
1340		udelay(50);
1341	}
1342
1343	return 0;
1344}
1345
1346static int gmc_v8_0_post_soft_reset(void *handle)
1347{
1348	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1349
1350	if (!adev->gmc.srbm_soft_reset)
1351		return 0;
1352
1353	gmc_v8_0_mc_resume(adev);
1354	return 0;
1355}
1356
1357static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1358					     struct amdgpu_irq_src *src,
1359					     unsigned type,
1360					     enum amdgpu_interrupt_state state)
1361{
1362	u32 tmp;
1363	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1364		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1365		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1366		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1367		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1368		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1369		    VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1370
1371	switch (state) {
1372	case AMDGPU_IRQ_STATE_DISABLE:
1373		/* system context */
1374		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1375		tmp &= ~bits;
1376		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1377		/* VMs */
1378		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1379		tmp &= ~bits;
1380		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1381		break;
1382	case AMDGPU_IRQ_STATE_ENABLE:
1383		/* system context */
1384		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1385		tmp |= bits;
1386		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1387		/* VMs */
1388		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1389		tmp |= bits;
1390		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1391		break;
1392	default:
1393		break;
1394	}
1395
1396	return 0;
1397}
1398
1399static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1400				      struct amdgpu_irq_src *source,
1401				      struct amdgpu_iv_entry *entry)
1402{
1403	u32 addr, status, mc_client;
1404
1405	if (amdgpu_sriov_vf(adev)) {
1406		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1407			entry->src_id, entry->src_data[0]);
1408		dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n");
1409		return 0;
1410	}
1411
1412	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1413	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1414	mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1415	/* reset addr and status */
1416	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1417
1418	if (!addr && !status)
1419		return 0;
1420
 
 
 
1421	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1422		gmc_v8_0_set_fault_enable_default(adev, false);
1423
1424	if (printk_ratelimit()) {
 
 
1425		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1426			entry->src_id, entry->src_data[0]);
 
 
 
 
 
 
 
 
 
1427		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1428			addr);
1429		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1430			status);
 
1431		gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client,
1432					 entry->pasid);
1433	}
1434
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1435	return 0;
1436}
1437
1438static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1439						     bool enable)
1440{
1441	uint32_t data;
1442
1443	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
1444		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1445		data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1446		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1447
1448		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1449		data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1450		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1451
1452		data = RREG32(mmMC_HUB_MISC_VM_CG);
1453		data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1454		WREG32(mmMC_HUB_MISC_VM_CG, data);
1455
1456		data = RREG32(mmMC_XPB_CLK_GAT);
1457		data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1458		WREG32(mmMC_XPB_CLK_GAT, data);
1459
1460		data = RREG32(mmATC_MISC_CG);
1461		data |= ATC_MISC_CG__ENABLE_MASK;
1462		WREG32(mmATC_MISC_CG, data);
1463
1464		data = RREG32(mmMC_CITF_MISC_WR_CG);
1465		data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1466		WREG32(mmMC_CITF_MISC_WR_CG, data);
1467
1468		data = RREG32(mmMC_CITF_MISC_RD_CG);
1469		data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1470		WREG32(mmMC_CITF_MISC_RD_CG, data);
1471
1472		data = RREG32(mmMC_CITF_MISC_VM_CG);
1473		data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1474		WREG32(mmMC_CITF_MISC_VM_CG, data);
1475
1476		data = RREG32(mmVM_L2_CG);
1477		data |= VM_L2_CG__ENABLE_MASK;
1478		WREG32(mmVM_L2_CG, data);
1479	} else {
1480		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1481		data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1482		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1483
1484		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1485		data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1486		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1487
1488		data = RREG32(mmMC_HUB_MISC_VM_CG);
1489		data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1490		WREG32(mmMC_HUB_MISC_VM_CG, data);
1491
1492		data = RREG32(mmMC_XPB_CLK_GAT);
1493		data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1494		WREG32(mmMC_XPB_CLK_GAT, data);
1495
1496		data = RREG32(mmATC_MISC_CG);
1497		data &= ~ATC_MISC_CG__ENABLE_MASK;
1498		WREG32(mmATC_MISC_CG, data);
1499
1500		data = RREG32(mmMC_CITF_MISC_WR_CG);
1501		data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1502		WREG32(mmMC_CITF_MISC_WR_CG, data);
1503
1504		data = RREG32(mmMC_CITF_MISC_RD_CG);
1505		data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1506		WREG32(mmMC_CITF_MISC_RD_CG, data);
1507
1508		data = RREG32(mmMC_CITF_MISC_VM_CG);
1509		data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1510		WREG32(mmMC_CITF_MISC_VM_CG, data);
1511
1512		data = RREG32(mmVM_L2_CG);
1513		data &= ~VM_L2_CG__ENABLE_MASK;
1514		WREG32(mmVM_L2_CG, data);
1515	}
1516}
1517
1518static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1519				       bool enable)
1520{
1521	uint32_t data;
1522
1523	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
1524		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1525		data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1526		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1527
1528		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1529		data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1530		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1531
1532		data = RREG32(mmMC_HUB_MISC_VM_CG);
1533		data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1534		WREG32(mmMC_HUB_MISC_VM_CG, data);
1535
1536		data = RREG32(mmMC_XPB_CLK_GAT);
1537		data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1538		WREG32(mmMC_XPB_CLK_GAT, data);
1539
1540		data = RREG32(mmATC_MISC_CG);
1541		data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1542		WREG32(mmATC_MISC_CG, data);
1543
1544		data = RREG32(mmMC_CITF_MISC_WR_CG);
1545		data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1546		WREG32(mmMC_CITF_MISC_WR_CG, data);
1547
1548		data = RREG32(mmMC_CITF_MISC_RD_CG);
1549		data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1550		WREG32(mmMC_CITF_MISC_RD_CG, data);
1551
1552		data = RREG32(mmMC_CITF_MISC_VM_CG);
1553		data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1554		WREG32(mmMC_CITF_MISC_VM_CG, data);
1555
1556		data = RREG32(mmVM_L2_CG);
1557		data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1558		WREG32(mmVM_L2_CG, data);
1559	} else {
1560		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1561		data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1562		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1563
1564		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1565		data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1566		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1567
1568		data = RREG32(mmMC_HUB_MISC_VM_CG);
1569		data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1570		WREG32(mmMC_HUB_MISC_VM_CG, data);
1571
1572		data = RREG32(mmMC_XPB_CLK_GAT);
1573		data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1574		WREG32(mmMC_XPB_CLK_GAT, data);
1575
1576		data = RREG32(mmATC_MISC_CG);
1577		data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1578		WREG32(mmATC_MISC_CG, data);
1579
1580		data = RREG32(mmMC_CITF_MISC_WR_CG);
1581		data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1582		WREG32(mmMC_CITF_MISC_WR_CG, data);
1583
1584		data = RREG32(mmMC_CITF_MISC_RD_CG);
1585		data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1586		WREG32(mmMC_CITF_MISC_RD_CG, data);
1587
1588		data = RREG32(mmMC_CITF_MISC_VM_CG);
1589		data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1590		WREG32(mmMC_CITF_MISC_VM_CG, data);
1591
1592		data = RREG32(mmVM_L2_CG);
1593		data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1594		WREG32(mmVM_L2_CG, data);
1595	}
1596}
1597
1598static int gmc_v8_0_set_clockgating_state(void *handle,
1599					  enum amd_clockgating_state state)
1600{
1601	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1602
1603	if (amdgpu_sriov_vf(adev))
1604		return 0;
1605
1606	switch (adev->asic_type) {
1607	case CHIP_FIJI:
1608		fiji_update_mc_medium_grain_clock_gating(adev,
1609				state == AMD_CG_STATE_GATE);
1610		fiji_update_mc_light_sleep(adev,
1611				state == AMD_CG_STATE_GATE);
1612		break;
1613	default:
1614		break;
1615	}
1616	return 0;
1617}
1618
1619static int gmc_v8_0_set_powergating_state(void *handle,
1620					  enum amd_powergating_state state)
1621{
1622	return 0;
1623}
1624
1625static void gmc_v8_0_get_clockgating_state(void *handle, u32 *flags)
1626{
1627	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1628	int data;
1629
1630	if (amdgpu_sriov_vf(adev))
1631		*flags = 0;
1632
1633	/* AMD_CG_SUPPORT_MC_MGCG */
1634	data = RREG32(mmMC_HUB_MISC_HUB_CG);
1635	if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK)
1636		*flags |= AMD_CG_SUPPORT_MC_MGCG;
1637
1638	/* AMD_CG_SUPPORT_MC_LS */
1639	if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK)
1640		*flags |= AMD_CG_SUPPORT_MC_LS;
1641}
1642
1643static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1644	.name = "gmc_v8_0",
1645	.early_init = gmc_v8_0_early_init,
1646	.late_init = gmc_v8_0_late_init,
1647	.sw_init = gmc_v8_0_sw_init,
1648	.sw_fini = gmc_v8_0_sw_fini,
1649	.hw_init = gmc_v8_0_hw_init,
1650	.hw_fini = gmc_v8_0_hw_fini,
1651	.suspend = gmc_v8_0_suspend,
1652	.resume = gmc_v8_0_resume,
1653	.is_idle = gmc_v8_0_is_idle,
1654	.wait_for_idle = gmc_v8_0_wait_for_idle,
1655	.check_soft_reset = gmc_v8_0_check_soft_reset,
1656	.pre_soft_reset = gmc_v8_0_pre_soft_reset,
1657	.soft_reset = gmc_v8_0_soft_reset,
1658	.post_soft_reset = gmc_v8_0_post_soft_reset,
1659	.set_clockgating_state = gmc_v8_0_set_clockgating_state,
1660	.set_powergating_state = gmc_v8_0_set_powergating_state,
1661	.get_clockgating_state = gmc_v8_0_get_clockgating_state,
1662};
1663
1664static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
1665	.flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
 
1666	.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
1667	.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
1668	.set_pte_pde = gmc_v8_0_set_pte_pde,
1669	.set_prt = gmc_v8_0_set_prt,
1670	.get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
1671	.get_vm_pde = gmc_v8_0_get_vm_pde
 
1672};
1673
1674static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1675	.set = gmc_v8_0_vm_fault_interrupt_state,
1676	.process = gmc_v8_0_process_interrupt,
1677};
1678
1679static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
1680{
1681	if (adev->gmc.gmc_funcs == NULL)
1682		adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
1683}
1684
1685static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1686{
1687	adev->gmc.vm_fault.num_types = 1;
1688	adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1689}
1690
1691const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
1692{
1693	.type = AMD_IP_BLOCK_TYPE_GMC,
1694	.major = 8,
1695	.minor = 0,
1696	.rev = 0,
1697	.funcs = &gmc_v8_0_ip_funcs,
1698};
1699
1700const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
1701{
1702	.type = AMD_IP_BLOCK_TYPE_GMC,
1703	.major = 8,
1704	.minor = 1,
1705	.rev = 0,
1706	.funcs = &gmc_v8_0_ip_funcs,
1707};
1708
1709const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
1710{
1711	.type = AMD_IP_BLOCK_TYPE_GMC,
1712	.major = 8,
1713	.minor = 5,
1714	.rev = 0,
1715	.funcs = &gmc_v8_0_ip_funcs,
1716};