Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include <linux/firmware.h>
  24#include "drmP.h"
  25#include "amdgpu.h"
  26#include "gmc_v8_0.h"
  27#include "amdgpu_ucode.h"
  28
  29#include "gmc/gmc_8_1_d.h"
  30#include "gmc/gmc_8_1_sh_mask.h"
  31
  32#include "bif/bif_5_0_d.h"
  33#include "bif/bif_5_0_sh_mask.h"
  34
  35#include "oss/oss_3_0_d.h"
  36#include "oss/oss_3_0_sh_mask.h"
  37
  38#include "vid.h"
  39#include "vi.h"
  40
  41
  42static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
  43static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
  44
  45MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
  46
  47static const u32 golden_settings_tonga_a11[] =
  48{
  49	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
  50	mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
  51	mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
  52	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  53	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  54	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  55	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  56};
  57
  58static const u32 tonga_mgcg_cgcg_init[] =
  59{
  60	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  61};
  62
  63static const u32 golden_settings_fiji_a10[] =
  64{
  65	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  66	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  67	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  68	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  69};
  70
  71static const u32 fiji_mgcg_cgcg_init[] =
  72{
  73	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  74};
  75
  76static const u32 cz_mgcg_cgcg_init[] =
  77{
  78	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  79};
  80
  81static const u32 stoney_mgcg_cgcg_init[] =
  82{
  83	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  84};
  85
  86
  87static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
  88{
  89	switch (adev->asic_type) {
  90	case CHIP_FIJI:
  91		amdgpu_program_register_sequence(adev,
  92						 fiji_mgcg_cgcg_init,
  93						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
  94		amdgpu_program_register_sequence(adev,
  95						 golden_settings_fiji_a10,
  96						 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
  97		break;
  98	case CHIP_TONGA:
  99		amdgpu_program_register_sequence(adev,
 100						 tonga_mgcg_cgcg_init,
 101						 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
 102		amdgpu_program_register_sequence(adev,
 103						 golden_settings_tonga_a11,
 104						 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
 105		break;
 106	case CHIP_CARRIZO:
 107		amdgpu_program_register_sequence(adev,
 108						 cz_mgcg_cgcg_init,
 109						 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
 110		break;
 111	case CHIP_STONEY:
 112		amdgpu_program_register_sequence(adev,
 113						 stoney_mgcg_cgcg_init,
 114						 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
 115		break;
 116	default:
 117		break;
 118	}
 119}
 120
 121/**
 122 * gmc8_mc_wait_for_idle - wait for MC idle callback.
 123 *
 124 * @adev: amdgpu_device pointer
 125 *
 126 * Wait for the MC (memory controller) to be idle.
 127 * (evergreen+).
 128 * Returns 0 if the MC is idle, -1 if not.
 129 */
 130int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev)
 131{
 132	unsigned i;
 133	u32 tmp;
 134
 135	for (i = 0; i < adev->usec_timeout; i++) {
 136		/* read MC_STATUS */
 137		tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK |
 138					       SRBM_STATUS__MCB_BUSY_MASK |
 139					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
 140					       SRBM_STATUS__MCC_BUSY_MASK |
 141					       SRBM_STATUS__MCD_BUSY_MASK |
 142					       SRBM_STATUS__VMC1_BUSY_MASK);
 143		if (!tmp)
 144			return 0;
 145		udelay(1);
 146	}
 147	return -1;
 148}
 149
 150void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
 151		      struct amdgpu_mode_mc_save *save)
 152{
 153	u32 blackout;
 154
 155	if (adev->mode_info.num_crtc)
 156		amdgpu_display_stop_mc_access(adev, save);
 157
 158	amdgpu_asic_wait_for_mc_idle(adev);
 159
 160	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 161	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
 162		/* Block CPU access */
 163		WREG32(mmBIF_FB_EN, 0);
 164		/* blackout the MC */
 165		blackout = REG_SET_FIELD(blackout,
 166					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
 167		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
 168	}
 169	/* wait for the MC to settle */
 170	udelay(100);
 171}
 172
 173void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
 174			struct amdgpu_mode_mc_save *save)
 175{
 176	u32 tmp;
 177
 178	/* unblackout the MC */
 179	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 180	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
 181	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
 182	/* allow CPU access */
 183	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
 184	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
 185	WREG32(mmBIF_FB_EN, tmp);
 186
 187	if (adev->mode_info.num_crtc)
 188		amdgpu_display_resume_mc_access(adev, save);
 189}
 190
 191/**
 192 * gmc_v8_0_init_microcode - load ucode images from disk
 193 *
 194 * @adev: amdgpu_device pointer
 195 *
 196 * Use the firmware interface to load the ucode images into
 197 * the driver (not loaded into hw).
 198 * Returns 0 on success, error on failure.
 199 */
 200static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
 201{
 202	const char *chip_name;
 203	char fw_name[30];
 204	int err;
 205
 206	DRM_DEBUG("\n");
 207
 208	switch (adev->asic_type) {
 209	case CHIP_TONGA:
 210		chip_name = "tonga";
 211		break;
 212	case CHIP_FIJI:
 213	case CHIP_CARRIZO:
 214	case CHIP_STONEY:
 215		return 0;
 216	default: BUG();
 217	}
 218
 219	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
 220	err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
 221	if (err)
 222		goto out;
 223	err = amdgpu_ucode_validate(adev->mc.fw);
 224
 225out:
 226	if (err) {
 227		printk(KERN_ERR
 228		       "mc: Failed to load firmware \"%s\"\n",
 229		       fw_name);
 230		release_firmware(adev->mc.fw);
 231		adev->mc.fw = NULL;
 232	}
 233	return err;
 234}
 235
 236/**
 237 * gmc_v8_0_mc_load_microcode - load MC ucode into the hw
 238 *
 239 * @adev: amdgpu_device pointer
 240 *
 241 * Load the GDDR MC ucode into the hw (CIK).
 242 * Returns 0 on success, error on failure.
 243 */
 244static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
 245{
 246	const struct mc_firmware_header_v1_0 *hdr;
 247	const __le32 *fw_data = NULL;
 248	const __le32 *io_mc_regs = NULL;
 249	u32 running, blackout = 0;
 250	int i, ucode_size, regs_size;
 251
 252	if (!adev->mc.fw)
 253		return -EINVAL;
 254
 255	/* Skip MC ucode loading on SR-IOV capable boards.
 256	 * vbios does this for us in asic_init in that case.
 257	 */
 258	if (adev->virtualization.supports_sr_iov)
 259		return 0;
 260
 261	hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
 262	amdgpu_ucode_print_mc_hdr(&hdr->header);
 263
 264	adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 265	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
 266	io_mc_regs = (const __le32 *)
 267		(adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
 268	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 269	fw_data = (const __le32 *)
 270		(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 271
 272	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
 273
 274	if (running == 0) {
 275		if (running) {
 276			blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 277			WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
 278		}
 279
 280		/* reset the engine and set to writable */
 281		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 282		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
 283
 284		/* load mc io regs */
 285		for (i = 0; i < regs_size; i++) {
 286			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
 287			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
 288		}
 289		/* load the MC ucode */
 290		for (i = 0; i < ucode_size; i++)
 291			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
 292
 293		/* put the engine back into the active state */
 294		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 295		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
 296		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
 297
 298		/* wait for training to complete */
 299		for (i = 0; i < adev->usec_timeout; i++) {
 300			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
 301					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
 302				break;
 303			udelay(1);
 304		}
 305		for (i = 0; i < adev->usec_timeout; i++) {
 306			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
 307					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
 308				break;
 309			udelay(1);
 310		}
 311
 312		if (running)
 313			WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
 314	}
 315
 316	return 0;
 317}
 318
 319static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
 320				       struct amdgpu_mc *mc)
 321{
 322	if (mc->mc_vram_size > 0xFFC0000000ULL) {
 323		/* leave room for at least 1024M GTT */
 324		dev_warn(adev->dev, "limiting VRAM\n");
 325		mc->real_vram_size = 0xFFC0000000ULL;
 326		mc->mc_vram_size = 0xFFC0000000ULL;
 327	}
 328	amdgpu_vram_location(adev, &adev->mc, 0);
 329	adev->mc.gtt_base_align = 0;
 330	amdgpu_gtt_location(adev, mc);
 331}
 332
 333/**
 334 * gmc_v8_0_mc_program - program the GPU memory controller
 335 *
 336 * @adev: amdgpu_device pointer
 337 *
 338 * Set the location of vram, gart, and AGP in the GPU's
 339 * physical address space (CIK).
 340 */
 341static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
 342{
 343	struct amdgpu_mode_mc_save save;
 344	u32 tmp;
 345	int i, j;
 346
 347	/* Initialize HDP */
 348	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
 349		WREG32((0xb05 + j), 0x00000000);
 350		WREG32((0xb06 + j), 0x00000000);
 351		WREG32((0xb07 + j), 0x00000000);
 352		WREG32((0xb08 + j), 0x00000000);
 353		WREG32((0xb09 + j), 0x00000000);
 354	}
 355	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 356
 357	if (adev->mode_info.num_crtc)
 358		amdgpu_display_set_vga_render_state(adev, false);
 359
 360	gmc_v8_0_mc_stop(adev, &save);
 361	if (amdgpu_asic_wait_for_mc_idle(adev)) {
 362		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 363	}
 364	/* Update configuration */
 365	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 366	       adev->mc.vram_start >> 12);
 367	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 368	       adev->mc.vram_end >> 12);
 369	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
 370	       adev->vram_scratch.gpu_addr >> 12);
 371	tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
 372	tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
 373	WREG32(mmMC_VM_FB_LOCATION, tmp);
 374	/* XXX double check these! */
 375	WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
 376	WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
 377	WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
 378	WREG32(mmMC_VM_AGP_BASE, 0);
 379	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
 380	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
 381	if (amdgpu_asic_wait_for_mc_idle(adev)) {
 382		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 383	}
 384	gmc_v8_0_mc_resume(adev, &save);
 385
 386	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
 387
 388	tmp = RREG32(mmHDP_MISC_CNTL);
 389	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
 390	WREG32(mmHDP_MISC_CNTL, tmp);
 391
 392	tmp = RREG32(mmHDP_HOST_PATH_CNTL);
 393	WREG32(mmHDP_HOST_PATH_CNTL, tmp);
 394}
 395
 396/**
 397 * gmc_v8_0_mc_init - initialize the memory controller driver params
 398 *
 399 * @adev: amdgpu_device pointer
 400 *
 401 * Look up the amount of vram, vram width, and decide how to place
 402 * vram and gart within the GPU's physical address space (CIK).
 403 * Returns 0 for success.
 404 */
 405static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
 406{
 407	u32 tmp;
 408	int chansize, numchan;
 409
 410	/* Get VRAM informations */
 411	tmp = RREG32(mmMC_ARB_RAMCFG);
 412	if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
 413		chansize = 64;
 414	} else {
 415		chansize = 32;
 416	}
 417	tmp = RREG32(mmMC_SHARED_CHMAP);
 418	switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
 419	case 0:
 420	default:
 421		numchan = 1;
 422		break;
 423	case 1:
 424		numchan = 2;
 425		break;
 426	case 2:
 427		numchan = 4;
 428		break;
 429	case 3:
 430		numchan = 8;
 431		break;
 432	case 4:
 433		numchan = 3;
 434		break;
 435	case 5:
 436		numchan = 6;
 437		break;
 438	case 6:
 439		numchan = 10;
 440		break;
 441	case 7:
 442		numchan = 12;
 443		break;
 444	case 8:
 445		numchan = 16;
 446		break;
 447	}
 448	adev->mc.vram_width = numchan * chansize;
 449	/* Could aper size report 0 ? */
 450	adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
 451	adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
 452	/* size in MB on si */
 453	adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 454	adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 455	adev->mc.visible_vram_size = adev->mc.aper_size;
 456
 457	/* In case the PCI BAR is larger than the actual amount of vram */
 458	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
 459		adev->mc.visible_vram_size = adev->mc.real_vram_size;
 460
 461	/* unless the user had overridden it, set the gart
 462	 * size equal to the 1024 or vram, whichever is larger.
 463	 */
 464	if (amdgpu_gart_size == -1)
 465		adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
 466	else
 467		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
 468
 469	gmc_v8_0_vram_gtt_location(adev, &adev->mc);
 470
 471	return 0;
 472}
 473
 474/*
 475 * GART
 476 * VMID 0 is the physical GPU addresses as used by the kernel.
 477 * VMIDs 1-15 are used for userspace clients and are handled
 478 * by the amdgpu vm/hsa code.
 479 */
 480
 481/**
 482 * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback
 483 *
 484 * @adev: amdgpu_device pointer
 485 * @vmid: vm instance to flush
 486 *
 487 * Flush the TLB for the requested page table (CIK).
 488 */
 489static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
 490					uint32_t vmid)
 491{
 492	/* flush hdp cache */
 493	WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
 494
 495	/* bits 0-15 are the VM contexts0-15 */
 496	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 497}
 498
 499/**
 500 * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO
 501 *
 502 * @adev: amdgpu_device pointer
 503 * @cpu_pt_addr: cpu address of the page table
 504 * @gpu_page_idx: entry in the page table to update
 505 * @addr: dst addr to write into pte/pde
 506 * @flags: access flags
 507 *
 508 * Update the page tables using the CPU.
 509 */
 510static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev,
 511				     void *cpu_pt_addr,
 512				     uint32_t gpu_page_idx,
 513				     uint64_t addr,
 514				     uint32_t flags)
 515{
 516	void __iomem *ptr = (void *)cpu_pt_addr;
 517	uint64_t value;
 518
 519	/*
 520	 * PTE format on VI:
 521	 * 63:40 reserved
 522	 * 39:12 4k physical page base address
 523	 * 11:7 fragment
 524	 * 6 write
 525	 * 5 read
 526	 * 4 exe
 527	 * 3 reserved
 528	 * 2 snooped
 529	 * 1 system
 530	 * 0 valid
 531	 *
 532	 * PDE format on VI:
 533	 * 63:59 block fragment size
 534	 * 58:40 reserved
 535	 * 39:1 physical base address of PTE
 536	 * bits 5:1 must be 0.
 537	 * 0 valid
 538	 */
 539	value = addr & 0x000000FFFFFFF000ULL;
 540	value |= flags;
 541	writeq(value, ptr + (gpu_page_idx * 8));
 542
 543	return 0;
 544}
 545
 546/**
 547 * gmc_v8_0_set_fault_enable_default - update VM fault handling
 548 *
 549 * @adev: amdgpu_device pointer
 550 * @value: true redirects VM faults to the default page
 551 */
 552static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
 553					      bool value)
 554{
 555	u32 tmp;
 556
 557	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 558	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 559			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 560	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 561			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 562	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 563			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 564	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 565			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 566	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 567			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 568	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 569			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 570	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 571			    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 572	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 573}
 574
 575/**
 576 * gmc_v8_0_gart_enable - gart enable
 577 *
 578 * @adev: amdgpu_device pointer
 579 *
 580 * This sets up the TLBs, programs the page tables for VMID0,
 581 * sets up the hw for VMIDs 1-15 which are allocated on
 582 * demand, and sets up the global locations for the LDS, GDS,
 583 * and GPUVM for FSA64 clients (CIK).
 584 * Returns 0 for success, errors for failure.
 585 */
 586static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 587{
 588	int r, i;
 589	u32 tmp;
 590
 591	if (adev->gart.robj == NULL) {
 592		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 593		return -EINVAL;
 594	}
 595	r = amdgpu_gart_table_vram_pin(adev);
 596	if (r)
 597		return r;
 598	/* Setup TLB control */
 599	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
 600	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
 601	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
 602	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
 603	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
 604	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
 605	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
 606	/* Setup L2 cache */
 607	tmp = RREG32(mmVM_L2_CNTL);
 608	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
 609	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
 610	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
 611	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
 612	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
 613	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
 614	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
 615	WREG32(mmVM_L2_CNTL, tmp);
 616	tmp = RREG32(mmVM_L2_CNTL2);
 617	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
 618	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
 619	WREG32(mmVM_L2_CNTL2, tmp);
 620	tmp = RREG32(mmVM_L2_CNTL3);
 621	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
 622	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
 623	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
 624	WREG32(mmVM_L2_CNTL3, tmp);
 625	/* XXX: set to enable PTE/PDE in system memory */
 626	tmp = RREG32(mmVM_L2_CNTL4);
 627	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
 628	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
 629	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
 630	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
 631	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
 632	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
 633	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
 634	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
 635	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
 636	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
 637	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
 638	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
 639	WREG32(mmVM_L2_CNTL4, tmp);
 640	/* setup context0 */
 641	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
 642	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
 643	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
 644	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 645			(u32)(adev->dummy_page.addr >> 12));
 646	WREG32(mmVM_CONTEXT0_CNTL2, 0);
 647	tmp = RREG32(mmVM_CONTEXT0_CNTL);
 648	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
 649	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
 650	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 651	WREG32(mmVM_CONTEXT0_CNTL, tmp);
 652
 653	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
 654	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
 655	WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
 656
 657	/* empty context1-15 */
 658	/* FIXME start with 4G, once using 2 level pt switch to full
 659	 * vm size space
 660	 */
 661	/* set vm size, must be a multiple of 4 */
 662	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
 663	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
 664	for (i = 1; i < 16; i++) {
 665		if (i < 8)
 666			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
 667			       adev->gart.table_addr >> 12);
 668		else
 669			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
 670			       adev->gart.table_addr >> 12);
 671	}
 672
 673	/* enable context1-15 */
 674	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
 675	       (u32)(adev->dummy_page.addr >> 12));
 676	WREG32(mmVM_CONTEXT1_CNTL2, 4);
 677	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 678	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
 679	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
 680	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 681	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 682	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 683	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 684	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 685	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 686	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 687	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
 688			    amdgpu_vm_block_size - 9);
 689	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 690	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
 691		gmc_v8_0_set_fault_enable_default(adev, false);
 692	else
 693		gmc_v8_0_set_fault_enable_default(adev, true);
 694
 695	gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
 696	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 697		 (unsigned)(adev->mc.gtt_size >> 20),
 698		 (unsigned long long)adev->gart.table_addr);
 699	adev->gart.ready = true;
 700	return 0;
 701}
 702
 703static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
 704{
 705	int r;
 706
 707	if (adev->gart.robj) {
 708		WARN(1, "R600 PCIE GART already initialized\n");
 709		return 0;
 710	}
 711	/* Initialize common gart structure */
 712	r = amdgpu_gart_init(adev);
 713	if (r)
 714		return r;
 715	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 716	return amdgpu_gart_table_vram_alloc(adev);
 717}
 718
 719/**
 720 * gmc_v8_0_gart_disable - gart disable
 721 *
 722 * @adev: amdgpu_device pointer
 723 *
 724 * This disables all VM page table (CIK).
 725 */
 726static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
 727{
 728	u32 tmp;
 729
 730	/* Disable all tables */
 731	WREG32(mmVM_CONTEXT0_CNTL, 0);
 732	WREG32(mmVM_CONTEXT1_CNTL, 0);
 733	/* Setup TLB control */
 734	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
 735	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
 736	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
 737	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
 738	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
 739	/* Setup L2 cache */
 740	tmp = RREG32(mmVM_L2_CNTL);
 741	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
 742	WREG32(mmVM_L2_CNTL, tmp);
 743	WREG32(mmVM_L2_CNTL2, 0);
 744	amdgpu_gart_table_vram_unpin(adev);
 745}
 746
 747/**
 748 * gmc_v8_0_gart_fini - vm fini callback
 749 *
 750 * @adev: amdgpu_device pointer
 751 *
 752 * Tears down the driver GART/VM setup (CIK).
 753 */
 754static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
 755{
 756	amdgpu_gart_table_vram_free(adev);
 757	amdgpu_gart_fini(adev);
 758}
 759
 760/*
 761 * vm
 762 * VMID 0 is the physical GPU addresses as used by the kernel.
 763 * VMIDs 1-15 are used for userspace clients and are handled
 764 * by the amdgpu vm/hsa code.
 765 */
 766/**
 767 * gmc_v8_0_vm_init - cik vm init callback
 768 *
 769 * @adev: amdgpu_device pointer
 770 *
 771 * Inits cik specific vm parameters (number of VMs, base of vram for
 772 * VMIDs 1-15) (CIK).
 773 * Returns 0 for success.
 774 */
 775static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
 776{
 777	/*
 778	 * number of VMs
 779	 * VMID 0 is reserved for System
 780	 * amdgpu graphics/compute will use VMIDs 1-7
 781	 * amdkfd will use VMIDs 8-15
 782	 */
 783	adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
 784	amdgpu_vm_manager_init(adev);
 785
 786	/* base offset of vram pages */
 787	if (adev->flags & AMD_IS_APU) {
 788		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
 789		tmp <<= 22;
 790		adev->vm_manager.vram_base_offset = tmp;
 791	} else
 792		adev->vm_manager.vram_base_offset = 0;
 793
 794	return 0;
 795}
 796
 797/**
 798 * gmc_v8_0_vm_fini - cik vm fini callback
 799 *
 800 * @adev: amdgpu_device pointer
 801 *
 802 * Tear down any asic specific VM setup (CIK).
 803 */
 804static void gmc_v8_0_vm_fini(struct amdgpu_device *adev)
 805{
 806}
 807
 808/**
 809 * gmc_v8_0_vm_decode_fault - print human readable fault info
 810 *
 811 * @adev: amdgpu_device pointer
 812 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
 813 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
 814 *
 815 * Print human readable fault information (CIK).
 816 */
 817static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev,
 818				     u32 status, u32 addr, u32 mc_client)
 819{
 820	u32 mc_id;
 821	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
 822	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 823					PROTECTIONS);
 824	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
 825		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
 826
 827	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 828			      MEMORY_CLIENT_ID);
 829
 830	printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
 831	       protections, vmid, addr,
 832	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 833			     MEMORY_CLIENT_RW) ?
 834	       "write" : "read", block, mc_client, mc_id);
 835}
 836
 837static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
 838{
 839	switch (mc_seq_vram_type) {
 840	case MC_SEQ_MISC0__MT__GDDR1:
 841		return AMDGPU_VRAM_TYPE_GDDR1;
 842	case MC_SEQ_MISC0__MT__DDR2:
 843		return AMDGPU_VRAM_TYPE_DDR2;
 844	case MC_SEQ_MISC0__MT__GDDR3:
 845		return AMDGPU_VRAM_TYPE_GDDR3;
 846	case MC_SEQ_MISC0__MT__GDDR4:
 847		return AMDGPU_VRAM_TYPE_GDDR4;
 848	case MC_SEQ_MISC0__MT__GDDR5:
 849		return AMDGPU_VRAM_TYPE_GDDR5;
 850	case MC_SEQ_MISC0__MT__HBM:
 851		return AMDGPU_VRAM_TYPE_HBM;
 852	case MC_SEQ_MISC0__MT__DDR3:
 853		return AMDGPU_VRAM_TYPE_DDR3;
 854	default:
 855		return AMDGPU_VRAM_TYPE_UNKNOWN;
 856	}
 857}
 858
 859static int gmc_v8_0_early_init(void *handle)
 860{
 861	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 862
 863	gmc_v8_0_set_gart_funcs(adev);
 864	gmc_v8_0_set_irq_funcs(adev);
 865
 866	return 0;
 867}
 868
 869static int gmc_v8_0_late_init(void *handle)
 870{
 871	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 872
 873	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
 874		return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
 875	else
 876		return 0;
 877}
 878
 879#define mmMC_SEQ_MISC0_FIJI 0xA71
 880
 881static int gmc_v8_0_sw_init(void *handle)
 882{
 883	int r;
 884	int dma_bits;
 885	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 886
 887	if (adev->flags & AMD_IS_APU) {
 888		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
 889	} else {
 890		u32 tmp;
 891
 892		if (adev->asic_type == CHIP_FIJI)
 893			tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
 894		else
 895			tmp = RREG32(mmMC_SEQ_MISC0);
 896		tmp &= MC_SEQ_MISC0__MT__MASK;
 897		adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
 898	}
 899
 900	r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
 901	if (r)
 902		return r;
 903
 904	r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
 905	if (r)
 906		return r;
 907
 908	/* Adjust VM size here.
 909	 * Currently set to 4GB ((1 << 20) 4k pages).
 910	 * Max GPUVM size for cayman and SI is 40 bits.
 911	 */
 912	adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
 913
 914	/* Set the internal MC address mask
 915	 * This is the max address of the GPU's
 916	 * internal address space.
 917	 */
 918	adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
 919
 920	/* set DMA mask + need_dma32 flags.
 921	 * PCIE - can handle 40-bits.
 922	 * IGP - can handle 40-bits
 923	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
 924	 */
 925	adev->need_dma32 = false;
 926	dma_bits = adev->need_dma32 ? 32 : 40;
 927	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
 928	if (r) {
 929		adev->need_dma32 = true;
 930		dma_bits = 32;
 931		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
 932	}
 933	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
 934	if (r) {
 935		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
 936		printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
 937	}
 938
 939	r = gmc_v8_0_init_microcode(adev);
 940	if (r) {
 941		DRM_ERROR("Failed to load mc firmware!\n");
 942		return r;
 943	}
 944
 945	r = gmc_v8_0_mc_init(adev);
 946	if (r)
 947		return r;
 948
 949	/* Memory manager */
 950	r = amdgpu_bo_init(adev);
 951	if (r)
 952		return r;
 953
 954	r = gmc_v8_0_gart_init(adev);
 955	if (r)
 956		return r;
 957
 958	if (!adev->vm_manager.enabled) {
 959		r = gmc_v8_0_vm_init(adev);
 960		if (r) {
 961			dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
 962			return r;
 963		}
 964		adev->vm_manager.enabled = true;
 965	}
 966
 967	return r;
 968}
 969
 970static int gmc_v8_0_sw_fini(void *handle)
 971{
 972	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 973
 974	if (adev->vm_manager.enabled) {
 975		amdgpu_vm_manager_fini(adev);
 976		gmc_v8_0_vm_fini(adev);
 977		adev->vm_manager.enabled = false;
 978	}
 979	gmc_v8_0_gart_fini(adev);
 980	amdgpu_gem_force_release(adev);
 981	amdgpu_bo_fini(adev);
 982
 983	return 0;
 984}
 985
 986static int gmc_v8_0_hw_init(void *handle)
 987{
 988	int r;
 989	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 990
 991	gmc_v8_0_init_golden_registers(adev);
 992
 993	gmc_v8_0_mc_program(adev);
 994
 995	if (adev->asic_type == CHIP_TONGA) {
 996		r = gmc_v8_0_mc_load_microcode(adev);
 997		if (r) {
 998			DRM_ERROR("Failed to load MC firmware!\n");
 999			return r;
1000		}
1001	}
1002
1003	r = gmc_v8_0_gart_enable(adev);
1004	if (r)
1005		return r;
1006
1007	return r;
1008}
1009
1010static int gmc_v8_0_hw_fini(void *handle)
1011{
1012	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1013
1014	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
1015	gmc_v8_0_gart_disable(adev);
1016
1017	return 0;
1018}
1019
1020static int gmc_v8_0_suspend(void *handle)
1021{
1022	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1023
1024	if (adev->vm_manager.enabled) {
1025		gmc_v8_0_vm_fini(adev);
1026		adev->vm_manager.enabled = false;
1027	}
1028	gmc_v8_0_hw_fini(adev);
1029
1030	return 0;
1031}
1032
1033static int gmc_v8_0_resume(void *handle)
1034{
1035	int r;
1036	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1037
1038	r = gmc_v8_0_hw_init(adev);
1039	if (r)
1040		return r;
1041
1042	if (!adev->vm_manager.enabled) {
1043		r = gmc_v8_0_vm_init(adev);
1044		if (r) {
1045			dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1046			return r;
1047		}
1048		adev->vm_manager.enabled = true;
1049	}
1050
1051	return r;
1052}
1053
1054static bool gmc_v8_0_is_idle(void *handle)
1055{
1056	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1057	u32 tmp = RREG32(mmSRBM_STATUS);
1058
1059	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1060		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1061		return false;
1062
1063	return true;
1064}
1065
1066static int gmc_v8_0_wait_for_idle(void *handle)
1067{
1068	unsigned i;
1069	u32 tmp;
1070	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1071
1072	for (i = 0; i < adev->usec_timeout; i++) {
1073		/* read MC_STATUS */
1074		tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1075					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1076					       SRBM_STATUS__MCC_BUSY_MASK |
1077					       SRBM_STATUS__MCD_BUSY_MASK |
1078					       SRBM_STATUS__VMC_BUSY_MASK |
1079					       SRBM_STATUS__VMC1_BUSY_MASK);
1080		if (!tmp)
1081			return 0;
1082		udelay(1);
1083	}
1084	return -ETIMEDOUT;
1085
1086}
1087
1088static void gmc_v8_0_print_status(void *handle)
1089{
1090	int i, j;
1091	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1092
1093	dev_info(adev->dev, "GMC 8.x registers\n");
1094	dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
1095		RREG32(mmSRBM_STATUS));
1096	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
1097		RREG32(mmSRBM_STATUS2));
1098
1099	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1100		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
1101	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1102		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
1103	dev_info(adev->dev, "  MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
1104		 RREG32(mmMC_VM_MX_L1_TLB_CNTL));
1105	dev_info(adev->dev, "  VM_L2_CNTL=0x%08X\n",
1106		 RREG32(mmVM_L2_CNTL));
1107	dev_info(adev->dev, "  VM_L2_CNTL2=0x%08X\n",
1108		 RREG32(mmVM_L2_CNTL2));
1109	dev_info(adev->dev, "  VM_L2_CNTL3=0x%08X\n",
1110		 RREG32(mmVM_L2_CNTL3));
1111	dev_info(adev->dev, "  VM_L2_CNTL4=0x%08X\n",
1112		 RREG32(mmVM_L2_CNTL4));
1113	dev_info(adev->dev, "  VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
1114		 RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR));
1115	dev_info(adev->dev, "  VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
1116		 RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR));
1117	dev_info(adev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1118		 RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR));
1119	dev_info(adev->dev, "  VM_CONTEXT0_CNTL2=0x%08X\n",
1120		 RREG32(mmVM_CONTEXT0_CNTL2));
1121	dev_info(adev->dev, "  VM_CONTEXT0_CNTL=0x%08X\n",
1122		 RREG32(mmVM_CONTEXT0_CNTL));
1123	dev_info(adev->dev, "  VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR=0x%08X\n",
1124		 RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR));
1125	dev_info(adev->dev, "  VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR=0x%08X\n",
1126		 RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR));
1127	dev_info(adev->dev, "  mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET=0x%08X\n",
1128		 RREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET));
1129	dev_info(adev->dev, "  VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
1130		 RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR));
1131	dev_info(adev->dev, "  VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
1132		 RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR));
1133	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1134		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR));
1135	dev_info(adev->dev, "  VM_CONTEXT1_CNTL2=0x%08X\n",
1136		 RREG32(mmVM_CONTEXT1_CNTL2));
1137	dev_info(adev->dev, "  VM_CONTEXT1_CNTL=0x%08X\n",
1138		 RREG32(mmVM_CONTEXT1_CNTL));
1139	for (i = 0; i < 16; i++) {
1140		if (i < 8)
1141			dev_info(adev->dev, "  VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1142				 i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i));
1143		else
1144			dev_info(adev->dev, "  VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1145				 i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8));
1146	}
1147	dev_info(adev->dev, "  MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
1148		 RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR));
1149	dev_info(adev->dev, "  MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
1150		 RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR));
1151	dev_info(adev->dev, "  MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
1152		 RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR));
1153	dev_info(adev->dev, "  MC_VM_FB_LOCATION=0x%08X\n",
1154		 RREG32(mmMC_VM_FB_LOCATION));
1155	dev_info(adev->dev, "  MC_VM_AGP_BASE=0x%08X\n",
1156		 RREG32(mmMC_VM_AGP_BASE));
1157	dev_info(adev->dev, "  MC_VM_AGP_TOP=0x%08X\n",
1158		 RREG32(mmMC_VM_AGP_TOP));
1159	dev_info(adev->dev, "  MC_VM_AGP_BOT=0x%08X\n",
1160		 RREG32(mmMC_VM_AGP_BOT));
1161
1162	dev_info(adev->dev, "  HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
1163		 RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL));
1164	dev_info(adev->dev, "  HDP_NONSURFACE_BASE=0x%08X\n",
1165		 RREG32(mmHDP_NONSURFACE_BASE));
1166	dev_info(adev->dev, "  HDP_NONSURFACE_INFO=0x%08X\n",
1167		 RREG32(mmHDP_NONSURFACE_INFO));
1168	dev_info(adev->dev, "  HDP_NONSURFACE_SIZE=0x%08X\n",
1169		 RREG32(mmHDP_NONSURFACE_SIZE));
1170	dev_info(adev->dev, "  HDP_MISC_CNTL=0x%08X\n",
1171		 RREG32(mmHDP_MISC_CNTL));
1172	dev_info(adev->dev, "  HDP_HOST_PATH_CNTL=0x%08X\n",
1173		 RREG32(mmHDP_HOST_PATH_CNTL));
1174
1175	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
1176		dev_info(adev->dev, "  %d:\n", i);
1177		dev_info(adev->dev, "  0x%04X=0x%08X\n",
1178			 0xb05 + j, RREG32(0xb05 + j));
1179		dev_info(adev->dev, "  0x%04X=0x%08X\n",
1180			 0xb06 + j, RREG32(0xb06 + j));
1181		dev_info(adev->dev, "  0x%04X=0x%08X\n",
1182			 0xb07 + j, RREG32(0xb07 + j));
1183		dev_info(adev->dev, "  0x%04X=0x%08X\n",
1184			 0xb08 + j, RREG32(0xb08 + j));
1185		dev_info(adev->dev, "  0x%04X=0x%08X\n",
1186			 0xb09 + j, RREG32(0xb09 + j));
1187	}
1188
1189	dev_info(adev->dev, "  BIF_FB_EN=0x%08X\n",
1190		 RREG32(mmBIF_FB_EN));
1191}
1192
1193static int gmc_v8_0_soft_reset(void *handle)
1194{
1195	struct amdgpu_mode_mc_save save;
1196	u32 srbm_soft_reset = 0;
1197	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1198	u32 tmp = RREG32(mmSRBM_STATUS);
1199
1200	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1201		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1202						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1203
1204	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1205		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1206		if (!(adev->flags & AMD_IS_APU))
1207			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1208							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1209	}
1210
1211	if (srbm_soft_reset) {
1212		gmc_v8_0_print_status((void *)adev);
1213
1214		gmc_v8_0_mc_stop(adev, &save);
1215		if (gmc_v8_0_wait_for_idle(adev)) {
1216			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1217		}
1218
1219
1220		tmp = RREG32(mmSRBM_SOFT_RESET);
1221		tmp |= srbm_soft_reset;
1222		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1223		WREG32(mmSRBM_SOFT_RESET, tmp);
1224		tmp = RREG32(mmSRBM_SOFT_RESET);
1225
1226		udelay(50);
1227
1228		tmp &= ~srbm_soft_reset;
1229		WREG32(mmSRBM_SOFT_RESET, tmp);
1230		tmp = RREG32(mmSRBM_SOFT_RESET);
1231
1232		/* Wait a little for things to settle down */
1233		udelay(50);
1234
1235		gmc_v8_0_mc_resume(adev, &save);
1236		udelay(50);
1237
1238		gmc_v8_0_print_status((void *)adev);
1239	}
1240
1241	return 0;
1242}
1243
1244static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1245					     struct amdgpu_irq_src *src,
1246					     unsigned type,
1247					     enum amdgpu_interrupt_state state)
1248{
1249	u32 tmp;
1250	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1251		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1252		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1253		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1254		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1255		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1256		    VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1257
1258	switch (state) {
1259	case AMDGPU_IRQ_STATE_DISABLE:
1260		/* system context */
1261		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1262		tmp &= ~bits;
1263		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1264		/* VMs */
1265		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1266		tmp &= ~bits;
1267		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1268		break;
1269	case AMDGPU_IRQ_STATE_ENABLE:
1270		/* system context */
1271		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1272		tmp |= bits;
1273		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1274		/* VMs */
1275		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1276		tmp |= bits;
1277		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1278		break;
1279	default:
1280		break;
1281	}
1282
1283	return 0;
1284}
1285
1286static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1287				      struct amdgpu_irq_src *source,
1288				      struct amdgpu_iv_entry *entry)
1289{
1290	u32 addr, status, mc_client;
1291
1292	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1293	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1294	mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1295	/* reset addr and status */
1296	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1297
1298	if (!addr && !status)
1299		return 0;
1300
1301	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1302		gmc_v8_0_set_fault_enable_default(adev, false);
1303
1304	dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1305		entry->src_id, entry->src_data);
1306	dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1307		addr);
1308	dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1309		status);
1310	gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
1311
1312	return 0;
1313}
1314
1315static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1316		bool enable)
1317{
1318	uint32_t data;
1319
1320	if (enable) {
1321		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1322		data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1323		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1324
1325		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1326		data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1327		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1328
1329		data = RREG32(mmMC_HUB_MISC_VM_CG);
1330		data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1331		WREG32(mmMC_HUB_MISC_VM_CG, data);
1332
1333		data = RREG32(mmMC_XPB_CLK_GAT);
1334		data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1335		WREG32(mmMC_XPB_CLK_GAT, data);
1336
1337		data = RREG32(mmATC_MISC_CG);
1338		data |= ATC_MISC_CG__ENABLE_MASK;
1339		WREG32(mmATC_MISC_CG, data);
1340
1341		data = RREG32(mmMC_CITF_MISC_WR_CG);
1342		data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1343		WREG32(mmMC_CITF_MISC_WR_CG, data);
1344
1345		data = RREG32(mmMC_CITF_MISC_RD_CG);
1346		data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1347		WREG32(mmMC_CITF_MISC_RD_CG, data);
1348
1349		data = RREG32(mmMC_CITF_MISC_VM_CG);
1350		data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1351		WREG32(mmMC_CITF_MISC_VM_CG, data);
1352
1353		data = RREG32(mmVM_L2_CG);
1354		data |= VM_L2_CG__ENABLE_MASK;
1355		WREG32(mmVM_L2_CG, data);
1356	} else {
1357		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1358		data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1359		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1360
1361		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1362		data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1363		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1364
1365		data = RREG32(mmMC_HUB_MISC_VM_CG);
1366		data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1367		WREG32(mmMC_HUB_MISC_VM_CG, data);
1368
1369		data = RREG32(mmMC_XPB_CLK_GAT);
1370		data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1371		WREG32(mmMC_XPB_CLK_GAT, data);
1372
1373		data = RREG32(mmATC_MISC_CG);
1374		data &= ~ATC_MISC_CG__ENABLE_MASK;
1375		WREG32(mmATC_MISC_CG, data);
1376
1377		data = RREG32(mmMC_CITF_MISC_WR_CG);
1378		data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1379		WREG32(mmMC_CITF_MISC_WR_CG, data);
1380
1381		data = RREG32(mmMC_CITF_MISC_RD_CG);
1382		data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1383		WREG32(mmMC_CITF_MISC_RD_CG, data);
1384
1385		data = RREG32(mmMC_CITF_MISC_VM_CG);
1386		data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1387		WREG32(mmMC_CITF_MISC_VM_CG, data);
1388
1389		data = RREG32(mmVM_L2_CG);
1390		data &= ~VM_L2_CG__ENABLE_MASK;
1391		WREG32(mmVM_L2_CG, data);
1392	}
1393}
1394
1395static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1396		bool enable)
1397{
1398	uint32_t data;
1399
1400	if (enable) {
1401		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1402		data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1403		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1404
1405		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1406		data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1407		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1408
1409		data = RREG32(mmMC_HUB_MISC_VM_CG);
1410		data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1411		WREG32(mmMC_HUB_MISC_VM_CG, data);
1412
1413		data = RREG32(mmMC_XPB_CLK_GAT);
1414		data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1415		WREG32(mmMC_XPB_CLK_GAT, data);
1416
1417		data = RREG32(mmATC_MISC_CG);
1418		data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1419		WREG32(mmATC_MISC_CG, data);
1420
1421		data = RREG32(mmMC_CITF_MISC_WR_CG);
1422		data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1423		WREG32(mmMC_CITF_MISC_WR_CG, data);
1424
1425		data = RREG32(mmMC_CITF_MISC_RD_CG);
1426		data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1427		WREG32(mmMC_CITF_MISC_RD_CG, data);
1428
1429		data = RREG32(mmMC_CITF_MISC_VM_CG);
1430		data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1431		WREG32(mmMC_CITF_MISC_VM_CG, data);
1432
1433		data = RREG32(mmVM_L2_CG);
1434		data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1435		WREG32(mmVM_L2_CG, data);
1436	} else {
1437		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1438		data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1439		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1440
1441		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1442		data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1443		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1444
1445		data = RREG32(mmMC_HUB_MISC_VM_CG);
1446		data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1447		WREG32(mmMC_HUB_MISC_VM_CG, data);
1448
1449		data = RREG32(mmMC_XPB_CLK_GAT);
1450		data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1451		WREG32(mmMC_XPB_CLK_GAT, data);
1452
1453		data = RREG32(mmATC_MISC_CG);
1454		data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1455		WREG32(mmATC_MISC_CG, data);
1456
1457		data = RREG32(mmMC_CITF_MISC_WR_CG);
1458		data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1459		WREG32(mmMC_CITF_MISC_WR_CG, data);
1460
1461		data = RREG32(mmMC_CITF_MISC_RD_CG);
1462		data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1463		WREG32(mmMC_CITF_MISC_RD_CG, data);
1464
1465		data = RREG32(mmMC_CITF_MISC_VM_CG);
1466		data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1467		WREG32(mmMC_CITF_MISC_VM_CG, data);
1468
1469		data = RREG32(mmVM_L2_CG);
1470		data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1471		WREG32(mmVM_L2_CG, data);
1472	}
1473}
1474
1475static int gmc_v8_0_set_clockgating_state(void *handle,
1476					  enum amd_clockgating_state state)
1477{
1478	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1479
1480	switch (adev->asic_type) {
1481	case CHIP_FIJI:
1482		fiji_update_mc_medium_grain_clock_gating(adev,
1483				state == AMD_CG_STATE_GATE ? true : false);
1484		fiji_update_mc_light_sleep(adev,
1485				state == AMD_CG_STATE_GATE ? true : false);
1486		break;
1487	default:
1488		break;
1489	}
1490	return 0;
1491}
1492
1493static int gmc_v8_0_set_powergating_state(void *handle,
1494					  enum amd_powergating_state state)
1495{
1496	return 0;
1497}
1498
1499const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1500	.early_init = gmc_v8_0_early_init,
1501	.late_init = gmc_v8_0_late_init,
1502	.sw_init = gmc_v8_0_sw_init,
1503	.sw_fini = gmc_v8_0_sw_fini,
1504	.hw_init = gmc_v8_0_hw_init,
1505	.hw_fini = gmc_v8_0_hw_fini,
1506	.suspend = gmc_v8_0_suspend,
1507	.resume = gmc_v8_0_resume,
1508	.is_idle = gmc_v8_0_is_idle,
1509	.wait_for_idle = gmc_v8_0_wait_for_idle,
1510	.soft_reset = gmc_v8_0_soft_reset,
1511	.print_status = gmc_v8_0_print_status,
1512	.set_clockgating_state = gmc_v8_0_set_clockgating_state,
1513	.set_powergating_state = gmc_v8_0_set_powergating_state,
1514};
1515
1516static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
1517	.flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
1518	.set_pte_pde = gmc_v8_0_gart_set_pte_pde,
1519};
1520
1521static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1522	.set = gmc_v8_0_vm_fault_interrupt_state,
1523	.process = gmc_v8_0_process_interrupt,
1524};
1525
1526static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
1527{
1528	if (adev->gart.gart_funcs == NULL)
1529		adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
1530}
1531
1532static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1533{
1534	adev->mc.vm_fault.num_types = 1;
1535	adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1536}