Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v4.6
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
 
  23#include <linux/firmware.h>
  24#include "drmP.h"
 
 
 
  25#include "amdgpu.h"
  26#include "gmc_v8_0.h"
  27#include "amdgpu_ucode.h"
 
 
  28
  29#include "gmc/gmc_8_1_d.h"
  30#include "gmc/gmc_8_1_sh_mask.h"
  31
  32#include "bif/bif_5_0_d.h"
  33#include "bif/bif_5_0_sh_mask.h"
  34
  35#include "oss/oss_3_0_d.h"
  36#include "oss/oss_3_0_sh_mask.h"
  37
 
 
 
  38#include "vid.h"
  39#include "vi.h"
  40
 
  41
  42static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
 
 
  43static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
 
  44
  45MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
 
 
 
 
 
 
 
  46
  47static const u32 golden_settings_tonga_a11[] =
  48{
  49	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
  50	mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
  51	mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
  52	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  53	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  54	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  55	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  56};
  57
  58static const u32 tonga_mgcg_cgcg_init[] =
  59{
  60	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  61};
  62
  63static const u32 golden_settings_fiji_a10[] =
  64{
  65	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  66	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  67	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  68	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  69};
  70
  71static const u32 fiji_mgcg_cgcg_init[] =
  72{
  73	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  74};
  75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  76static const u32 cz_mgcg_cgcg_init[] =
  77{
  78	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  79};
  80
  81static const u32 stoney_mgcg_cgcg_init[] =
  82{
 
  83	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  84};
  85
 
 
 
 
 
  86
  87static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
  88{
  89	switch (adev->asic_type) {
  90	case CHIP_FIJI:
  91		amdgpu_program_register_sequence(adev,
  92						 fiji_mgcg_cgcg_init,
  93						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
  94		amdgpu_program_register_sequence(adev,
  95						 golden_settings_fiji_a10,
  96						 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
  97		break;
  98	case CHIP_TONGA:
  99		amdgpu_program_register_sequence(adev,
 100						 tonga_mgcg_cgcg_init,
 101						 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
 102		amdgpu_program_register_sequence(adev,
 103						 golden_settings_tonga_a11,
 104						 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
 
 
 
 
 
 
 
 
 
 
 
 
 105		break;
 106	case CHIP_CARRIZO:
 107		amdgpu_program_register_sequence(adev,
 108						 cz_mgcg_cgcg_init,
 109						 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
 110		break;
 111	case CHIP_STONEY:
 112		amdgpu_program_register_sequence(adev,
 113						 stoney_mgcg_cgcg_init,
 114						 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
 
 
 
 115		break;
 116	default:
 117		break;
 118	}
 119}
 120
 121/**
 122 * gmc8_mc_wait_for_idle - wait for MC idle callback.
 123 *
 124 * @adev: amdgpu_device pointer
 125 *
 126 * Wait for the MC (memory controller) to be idle.
 127 * (evergreen+).
 128 * Returns 0 if the MC is idle, -1 if not.
 129 */
 130int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev)
 131{
 132	unsigned i;
 133	u32 tmp;
 134
 135	for (i = 0; i < adev->usec_timeout; i++) {
 136		/* read MC_STATUS */
 137		tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK |
 138					       SRBM_STATUS__MCB_BUSY_MASK |
 139					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
 140					       SRBM_STATUS__MCC_BUSY_MASK |
 141					       SRBM_STATUS__MCD_BUSY_MASK |
 142					       SRBM_STATUS__VMC1_BUSY_MASK);
 143		if (!tmp)
 144			return 0;
 145		udelay(1);
 146	}
 147	return -1;
 148}
 149
 150void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
 151		      struct amdgpu_mode_mc_save *save)
 152{
 153	u32 blackout;
 154
 155	if (adev->mode_info.num_crtc)
 156		amdgpu_display_stop_mc_access(adev, save);
 157
 158	amdgpu_asic_wait_for_mc_idle(adev);
 159
 160	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 161	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
 162		/* Block CPU access */
 163		WREG32(mmBIF_FB_EN, 0);
 164		/* blackout the MC */
 165		blackout = REG_SET_FIELD(blackout,
 166					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
 167		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
 168	}
 169	/* wait for the MC to settle */
 170	udelay(100);
 171}
 172
 173void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
 174			struct amdgpu_mode_mc_save *save)
 175{
 176	u32 tmp;
 177
 178	/* unblackout the MC */
 179	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 180	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
 181	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
 182	/* allow CPU access */
 183	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
 184	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
 185	WREG32(mmBIF_FB_EN, tmp);
 186
 187	if (adev->mode_info.num_crtc)
 188		amdgpu_display_resume_mc_access(adev, save);
 189}
 190
 191/**
 192 * gmc_v8_0_init_microcode - load ucode images from disk
 193 *
 194 * @adev: amdgpu_device pointer
 195 *
 196 * Use the firmware interface to load the ucode images into
 197 * the driver (not loaded into hw).
 198 * Returns 0 on success, error on failure.
 199 */
 200static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
 201{
 202	const char *chip_name;
 203	char fw_name[30];
 204	int err;
 205
 206	DRM_DEBUG("\n");
 207
 208	switch (adev->asic_type) {
 209	case CHIP_TONGA:
 210		chip_name = "tonga";
 211		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 212	case CHIP_FIJI:
 213	case CHIP_CARRIZO:
 214	case CHIP_STONEY:
 
 215		return 0;
 216	default: BUG();
 217	}
 218
 219	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
 220	err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
 221	if (err)
 222		goto out;
 223	err = amdgpu_ucode_validate(adev->mc.fw);
 224
 225out:
 226	if (err) {
 227		printk(KERN_ERR
 228		       "mc: Failed to load firmware \"%s\"\n",
 229		       fw_name);
 230		release_firmware(adev->mc.fw);
 231		adev->mc.fw = NULL;
 232	}
 233	return err;
 234}
 235
 236/**
 237 * gmc_v8_0_mc_load_microcode - load MC ucode into the hw
 238 *
 239 * @adev: amdgpu_device pointer
 240 *
 241 * Load the GDDR MC ucode into the hw (CIK).
 242 * Returns 0 on success, error on failure.
 243 */
 244static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
 245{
 246	const struct mc_firmware_header_v1_0 *hdr;
 247	const __le32 *fw_data = NULL;
 248	const __le32 *io_mc_regs = NULL;
 249	u32 running, blackout = 0;
 250	int i, ucode_size, regs_size;
 251
 252	if (!adev->mc.fw)
 253		return -EINVAL;
 254
 255	/* Skip MC ucode loading on SR-IOV capable boards.
 256	 * vbios does this for us in asic_init in that case.
 
 
 257	 */
 258	if (adev->virtualization.supports_sr_iov)
 259		return 0;
 260
 261	hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
 
 
 
 262	amdgpu_ucode_print_mc_hdr(&hdr->header);
 263
 264	adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 265	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
 266	io_mc_regs = (const __le32 *)
 267		(adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
 268	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 269	fw_data = (const __le32 *)
 270		(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 271
 272	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
 273
 274	if (running == 0) {
 275		if (running) {
 276			blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 277			WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
 278		}
 279
 280		/* reset the engine and set to writable */
 281		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 282		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
 283
 284		/* load mc io regs */
 285		for (i = 0; i < regs_size; i++) {
 286			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
 287			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
 288		}
 289		/* load the MC ucode */
 290		for (i = 0; i < ucode_size; i++)
 291			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
 292
 293		/* put the engine back into the active state */
 294		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 295		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
 296		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
 297
 298		/* wait for training to complete */
 299		for (i = 0; i < adev->usec_timeout; i++) {
 300			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
 301					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
 302				break;
 303			udelay(1);
 304		}
 305		for (i = 0; i < adev->usec_timeout; i++) {
 306			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
 307					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
 308				break;
 309			udelay(1);
 310		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 311
 312		if (running)
 313			WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
 
 
 
 
 314	}
 315
 316	return 0;
 317}
 318
 319static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
 320				       struct amdgpu_mc *mc)
 321{
 322	if (mc->mc_vram_size > 0xFFC0000000ULL) {
 323		/* leave room for at least 1024M GTT */
 324		dev_warn(adev->dev, "limiting VRAM\n");
 325		mc->real_vram_size = 0xFFC0000000ULL;
 326		mc->mc_vram_size = 0xFFC0000000ULL;
 327	}
 328	amdgpu_vram_location(adev, &adev->mc, 0);
 329	adev->mc.gtt_base_align = 0;
 330	amdgpu_gtt_location(adev, mc);
 331}
 332
 333/**
 334 * gmc_v8_0_mc_program - program the GPU memory controller
 335 *
 336 * @adev: amdgpu_device pointer
 337 *
 338 * Set the location of vram, gart, and AGP in the GPU's
 339 * physical address space (CIK).
 340 */
 341static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
 342{
 343	struct amdgpu_mode_mc_save save;
 344	u32 tmp;
 345	int i, j;
 346
 347	/* Initialize HDP */
 348	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
 349		WREG32((0xb05 + j), 0x00000000);
 350		WREG32((0xb06 + j), 0x00000000);
 351		WREG32((0xb07 + j), 0x00000000);
 352		WREG32((0xb08 + j), 0x00000000);
 353		WREG32((0xb09 + j), 0x00000000);
 354	}
 355	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 356
 357	if (adev->mode_info.num_crtc)
 358		amdgpu_display_set_vga_render_state(adev, false);
 359
 360	gmc_v8_0_mc_stop(adev, &save);
 361	if (amdgpu_asic_wait_for_mc_idle(adev)) {
 362		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 363	}
 
 
 
 
 
 
 
 
 
 
 
 364	/* Update configuration */
 365	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 366	       adev->mc.vram_start >> 12);
 367	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 368	       adev->mc.vram_end >> 12);
 369	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
 370	       adev->vram_scratch.gpu_addr >> 12);
 371	tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
 372	tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
 373	WREG32(mmMC_VM_FB_LOCATION, tmp);
 374	/* XXX double check these! */
 375	WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
 376	WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
 377	WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
 
 
 
 
 378	WREG32(mmMC_VM_AGP_BASE, 0);
 379	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
 380	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
 381	if (amdgpu_asic_wait_for_mc_idle(adev)) {
 382		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 383	}
 384	gmc_v8_0_mc_resume(adev, &save);
 385
 386	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
 387
 388	tmp = RREG32(mmHDP_MISC_CNTL);
 389	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
 390	WREG32(mmHDP_MISC_CNTL, tmp);
 391
 392	tmp = RREG32(mmHDP_HOST_PATH_CNTL);
 393	WREG32(mmHDP_HOST_PATH_CNTL, tmp);
 394}
 395
 396/**
 397 * gmc_v8_0_mc_init - initialize the memory controller driver params
 398 *
 399 * @adev: amdgpu_device pointer
 400 *
 401 * Look up the amount of vram, vram width, and decide how to place
 402 * vram and gart within the GPU's physical address space (CIK).
 403 * Returns 0 for success.
 404 */
 405static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
 406{
 
 407	u32 tmp;
 408	int chansize, numchan;
 409
 410	/* Get VRAM informations */
 411	tmp = RREG32(mmMC_ARB_RAMCFG);
 412	if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
 413		chansize = 64;
 414	} else {
 415		chansize = 32;
 416	}
 417	tmp = RREG32(mmMC_SHARED_CHMAP);
 418	switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
 419	case 0:
 420	default:
 421		numchan = 1;
 422		break;
 423	case 1:
 424		numchan = 2;
 425		break;
 426	case 2:
 427		numchan = 4;
 428		break;
 429	case 3:
 430		numchan = 8;
 431		break;
 432	case 4:
 433		numchan = 3;
 434		break;
 435	case 5:
 436		numchan = 6;
 437		break;
 438	case 6:
 439		numchan = 10;
 440		break;
 441	case 7:
 442		numchan = 12;
 443		break;
 444	case 8:
 445		numchan = 16;
 446		break;
 
 
 
 
 
 
 447	}
 448	adev->mc.vram_width = numchan * chansize;
 449	/* Could aper size report 0 ? */
 450	adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
 451	adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
 452	/* size in MB on si */
 453	adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 454	adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 455	adev->mc.visible_vram_size = adev->mc.aper_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 456
 457	/* In case the PCI BAR is larger than the actual amount of vram */
 458	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
 459		adev->mc.visible_vram_size = adev->mc.real_vram_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 460
 461	/* unless the user had overridden it, set the gart
 462	 * size equal to the 1024 or vram, whichever is larger.
 463	 */
 464	if (amdgpu_gart_size == -1)
 465		adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
 466	else
 467		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
 468
 469	gmc_v8_0_vram_gtt_location(adev, &adev->mc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 470
 471	return 0;
 
 472}
 473
 474/*
 475 * GART
 476 * VMID 0 is the physical GPU addresses as used by the kernel.
 477 * VMIDs 1-15 are used for userspace clients and are handled
 478 * by the amdgpu vm/hsa code.
 479 */
 480
 481/**
 482 * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback
 483 *
 484 * @adev: amdgpu_device pointer
 485 * @vmid: vm instance to flush
 
 
 486 *
 487 * Flush the TLB for the requested page table (CIK).
 488 */
 489static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
 490					uint32_t vmid)
 491{
 492	/* flush hdp cache */
 493	WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
 494
 495	/* bits 0-15 are the VM contexts0-15 */
 496	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 497}
 498
 499/**
 500 * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO
 501 *
 502 * @adev: amdgpu_device pointer
 503 * @cpu_pt_addr: cpu address of the page table
 504 * @gpu_page_idx: entry in the page table to update
 505 * @addr: dst addr to write into pte/pde
 506 * @flags: access flags
 507 *
 508 * Update the page tables using the CPU.
 509 */
 510static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev,
 511				     void *cpu_pt_addr,
 512				     uint32_t gpu_page_idx,
 513				     uint64_t addr,
 514				     uint32_t flags)
 515{
 516	void __iomem *ptr = (void *)cpu_pt_addr;
 517	uint64_t value;
 518
 519	/*
 520	 * PTE format on VI:
 521	 * 63:40 reserved
 522	 * 39:12 4k physical page base address
 523	 * 11:7 fragment
 524	 * 6 write
 525	 * 5 read
 526	 * 4 exe
 527	 * 3 reserved
 528	 * 2 snooped
 529	 * 1 system
 530	 * 0 valid
 531	 *
 532	 * PDE format on VI:
 533	 * 63:59 block fragment size
 534	 * 58:40 reserved
 535	 * 39:1 physical base address of PTE
 536	 * bits 5:1 must be 0.
 537	 * 0 valid
 538	 */
 539	value = addr & 0x000000FFFFFFF000ULL;
 540	value |= flags;
 541	writeq(value, ptr + (gpu_page_idx * 8));
 542
 543	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 544}
 545
 546/**
 547 * gmc_v8_0_set_fault_enable_default - update VM fault handling
 548 *
 549 * @adev: amdgpu_device pointer
 550 * @value: true redirects VM faults to the default page
 551 */
 552static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
 553					      bool value)
 554{
 555	u32 tmp;
 556
 557	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 558	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 559			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 560	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 561			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 562	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 563			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 564	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 565			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 566	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 567			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 568	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 569			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 570	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 571			    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 572	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 573}
 574
 575/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 576 * gmc_v8_0_gart_enable - gart enable
 577 *
 578 * @adev: amdgpu_device pointer
 579 *
 580 * This sets up the TLBs, programs the page tables for VMID0,
 581 * sets up the hw for VMIDs 1-15 which are allocated on
 582 * demand, and sets up the global locations for the LDS, GDS,
 583 * and GPUVM for FSA64 clients (CIK).
 584 * Returns 0 for success, errors for failure.
 585 */
 586static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 587{
 588	int r, i;
 589	u32 tmp;
 
 590
 591	if (adev->gart.robj == NULL) {
 592		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 593		return -EINVAL;
 594	}
 595	r = amdgpu_gart_table_vram_pin(adev);
 596	if (r)
 597		return r;
 598	/* Setup TLB control */
 599	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
 600	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
 601	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
 602	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
 603	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
 604	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
 605	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
 606	/* Setup L2 cache */
 607	tmp = RREG32(mmVM_L2_CNTL);
 608	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
 609	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
 610	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
 611	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
 612	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
 613	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
 614	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
 615	WREG32(mmVM_L2_CNTL, tmp);
 616	tmp = RREG32(mmVM_L2_CNTL2);
 617	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
 618	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
 619	WREG32(mmVM_L2_CNTL2, tmp);
 
 
 620	tmp = RREG32(mmVM_L2_CNTL3);
 621	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
 622	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
 623	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
 624	WREG32(mmVM_L2_CNTL3, tmp);
 625	/* XXX: set to enable PTE/PDE in system memory */
 626	tmp = RREG32(mmVM_L2_CNTL4);
 627	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
 628	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
 629	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
 630	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
 631	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
 632	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
 633	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
 634	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
 635	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
 636	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
 637	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
 638	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
 639	WREG32(mmVM_L2_CNTL4, tmp);
 640	/* setup context0 */
 641	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
 642	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
 643	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
 644	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 645			(u32)(adev->dummy_page.addr >> 12));
 646	WREG32(mmVM_CONTEXT0_CNTL2, 0);
 647	tmp = RREG32(mmVM_CONTEXT0_CNTL);
 648	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
 649	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
 650	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 651	WREG32(mmVM_CONTEXT0_CNTL, tmp);
 652
 653	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
 654	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
 655	WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
 656
 657	/* empty context1-15 */
 658	/* FIXME start with 4G, once using 2 level pt switch to full
 659	 * vm size space
 660	 */
 661	/* set vm size, must be a multiple of 4 */
 662	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
 663	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
 664	for (i = 1; i < 16; i++) {
 665		if (i < 8)
 666			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
 667			       adev->gart.table_addr >> 12);
 668		else
 669			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
 670			       adev->gart.table_addr >> 12);
 671	}
 672
 673	/* enable context1-15 */
 674	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
 675	       (u32)(adev->dummy_page.addr >> 12));
 676	WREG32(mmVM_CONTEXT1_CNTL2, 4);
 677	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 678	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
 679	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
 680	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 681	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 682	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 683	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 684	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 685	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 686	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 687	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
 688			    amdgpu_vm_block_size - 9);
 689	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 690	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
 691		gmc_v8_0_set_fault_enable_default(adev, false);
 692	else
 693		gmc_v8_0_set_fault_enable_default(adev, true);
 694
 695	gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
 696	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 697		 (unsigned)(adev->mc.gtt_size >> 20),
 698		 (unsigned long long)adev->gart.table_addr);
 699	adev->gart.ready = true;
 700	return 0;
 701}
 702
 703static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
 704{
 705	int r;
 706
 707	if (adev->gart.robj) {
 708		WARN(1, "R600 PCIE GART already initialized\n");
 709		return 0;
 710	}
 711	/* Initialize common gart structure */
 712	r = amdgpu_gart_init(adev);
 713	if (r)
 714		return r;
 715	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 
 716	return amdgpu_gart_table_vram_alloc(adev);
 717}
 718
 719/**
 720 * gmc_v8_0_gart_disable - gart disable
 721 *
 722 * @adev: amdgpu_device pointer
 723 *
 724 * This disables all VM page table (CIK).
 725 */
 726static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
 727{
 728	u32 tmp;
 729
 730	/* Disable all tables */
 731	WREG32(mmVM_CONTEXT0_CNTL, 0);
 732	WREG32(mmVM_CONTEXT1_CNTL, 0);
 733	/* Setup TLB control */
 734	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
 735	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
 736	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
 737	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
 738	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
 739	/* Setup L2 cache */
 740	tmp = RREG32(mmVM_L2_CNTL);
 741	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
 742	WREG32(mmVM_L2_CNTL, tmp);
 743	WREG32(mmVM_L2_CNTL2, 0);
 744	amdgpu_gart_table_vram_unpin(adev);
 745}
 746
 747/**
 748 * gmc_v8_0_gart_fini - vm fini callback
 749 *
 750 * @adev: amdgpu_device pointer
 751 *
 752 * Tears down the driver GART/VM setup (CIK).
 753 */
 754static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
 755{
 756	amdgpu_gart_table_vram_free(adev);
 757	amdgpu_gart_fini(adev);
 758}
 759
 760/*
 761 * vm
 762 * VMID 0 is the physical GPU addresses as used by the kernel.
 763 * VMIDs 1-15 are used for userspace clients and are handled
 764 * by the amdgpu vm/hsa code.
 765 */
 766/**
 767 * gmc_v8_0_vm_init - cik vm init callback
 768 *
 769 * @adev: amdgpu_device pointer
 770 *
 771 * Inits cik specific vm parameters (number of VMs, base of vram for
 772 * VMIDs 1-15) (CIK).
 773 * Returns 0 for success.
 774 */
 775static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
 776{
 777	/*
 778	 * number of VMs
 779	 * VMID 0 is reserved for System
 780	 * amdgpu graphics/compute will use VMIDs 1-7
 781	 * amdkfd will use VMIDs 8-15
 782	 */
 783	adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
 784	amdgpu_vm_manager_init(adev);
 785
 786	/* base offset of vram pages */
 787	if (adev->flags & AMD_IS_APU) {
 788		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
 789		tmp <<= 22;
 790		adev->vm_manager.vram_base_offset = tmp;
 791	} else
 792		adev->vm_manager.vram_base_offset = 0;
 793
 794	return 0;
 795}
 796
 797/**
 798 * gmc_v8_0_vm_fini - cik vm fini callback
 799 *
 800 * @adev: amdgpu_device pointer
 801 *
 802 * Tear down any asic specific VM setup (CIK).
 803 */
 804static void gmc_v8_0_vm_fini(struct amdgpu_device *adev)
 805{
 806}
 807
 808/**
 809 * gmc_v8_0_vm_decode_fault - print human readable fault info
 810 *
 811 * @adev: amdgpu_device pointer
 812 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
 813 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
 
 
 814 *
 815 * Print human readable fault information (CIK).
 816 */
 817static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev,
 818				     u32 status, u32 addr, u32 mc_client)
 819{
 820	u32 mc_id;
 821	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
 822	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 823					PROTECTIONS);
 824	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
 825		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
 
 826
 827	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 828			      MEMORY_CLIENT_ID);
 829
 830	printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
 831	       protections, vmid, addr,
 832	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 833			     MEMORY_CLIENT_RW) ?
 834	       "write" : "read", block, mc_client, mc_id);
 835}
 836
 837static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
 838{
 839	switch (mc_seq_vram_type) {
 840	case MC_SEQ_MISC0__MT__GDDR1:
 841		return AMDGPU_VRAM_TYPE_GDDR1;
 842	case MC_SEQ_MISC0__MT__DDR2:
 843		return AMDGPU_VRAM_TYPE_DDR2;
 844	case MC_SEQ_MISC0__MT__GDDR3:
 845		return AMDGPU_VRAM_TYPE_GDDR3;
 846	case MC_SEQ_MISC0__MT__GDDR4:
 847		return AMDGPU_VRAM_TYPE_GDDR4;
 848	case MC_SEQ_MISC0__MT__GDDR5:
 849		return AMDGPU_VRAM_TYPE_GDDR5;
 850	case MC_SEQ_MISC0__MT__HBM:
 851		return AMDGPU_VRAM_TYPE_HBM;
 852	case MC_SEQ_MISC0__MT__DDR3:
 853		return AMDGPU_VRAM_TYPE_DDR3;
 854	default:
 855		return AMDGPU_VRAM_TYPE_UNKNOWN;
 856	}
 857}
 858
 859static int gmc_v8_0_early_init(void *handle)
 860{
 861	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 862
 863	gmc_v8_0_set_gart_funcs(adev);
 864	gmc_v8_0_set_irq_funcs(adev);
 865
 
 
 
 
 
 
 
 
 866	return 0;
 867}
 868
 869static int gmc_v8_0_late_init(void *handle)
 870{
 871	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 872
 873	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
 874		return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
 875	else
 876		return 0;
 877}
 878
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 879#define mmMC_SEQ_MISC0_FIJI 0xA71
 880
 881static int gmc_v8_0_sw_init(void *handle)
 882{
 883	int r;
 884	int dma_bits;
 885	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 886
 
 
 887	if (adev->flags & AMD_IS_APU) {
 888		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
 889	} else {
 890		u32 tmp;
 891
 892		if (adev->asic_type == CHIP_FIJI)
 
 893			tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
 894		else
 895			tmp = RREG32(mmMC_SEQ_MISC0);
 896		tmp &= MC_SEQ_MISC0__MT__MASK;
 897		adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
 898	}
 899
 900	r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
 901	if (r)
 902		return r;
 903
 904	r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
 905	if (r)
 906		return r;
 907
 908	/* Adjust VM size here.
 909	 * Currently set to 4GB ((1 << 20) 4k pages).
 910	 * Max GPUVM size for cayman and SI is 40 bits.
 911	 */
 912	adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
 913
 914	/* Set the internal MC address mask
 915	 * This is the max address of the GPU's
 916	 * internal address space.
 917	 */
 918	adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
 919
 920	/* set DMA mask + need_dma32 flags.
 921	 * PCIE - can handle 40-bits.
 922	 * IGP - can handle 40-bits
 923	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
 924	 */
 925	adev->need_dma32 = false;
 926	dma_bits = adev->need_dma32 ? 32 : 40;
 927	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
 928	if (r) {
 929		adev->need_dma32 = true;
 930		dma_bits = 32;
 931		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
 932	}
 933	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
 934	if (r) {
 935		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
 936		printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
 937	}
 
 938
 939	r = gmc_v8_0_init_microcode(adev);
 940	if (r) {
 941		DRM_ERROR("Failed to load mc firmware!\n");
 942		return r;
 943	}
 944
 945	r = gmc_v8_0_mc_init(adev);
 946	if (r)
 947		return r;
 948
 
 
 949	/* Memory manager */
 950	r = amdgpu_bo_init(adev);
 951	if (r)
 952		return r;
 953
 954	r = gmc_v8_0_gart_init(adev);
 955	if (r)
 956		return r;
 957
 958	if (!adev->vm_manager.enabled) {
 959		r = gmc_v8_0_vm_init(adev);
 960		if (r) {
 961			dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
 962			return r;
 963		}
 964		adev->vm_manager.enabled = true;
 
 
 
 
 
 
 
 
 
 
 965	}
 966
 967	return r;
 
 
 
 
 
 
 968}
 969
 970static int gmc_v8_0_sw_fini(void *handle)
 971{
 972	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 973
 974	if (adev->vm_manager.enabled) {
 975		amdgpu_vm_manager_fini(adev);
 976		gmc_v8_0_vm_fini(adev);
 977		adev->vm_manager.enabled = false;
 978	}
 979	gmc_v8_0_gart_fini(adev);
 980	amdgpu_gem_force_release(adev);
 
 
 
 981	amdgpu_bo_fini(adev);
 
 
 982
 983	return 0;
 984}
 985
 986static int gmc_v8_0_hw_init(void *handle)
 987{
 988	int r;
 989	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 990
 991	gmc_v8_0_init_golden_registers(adev);
 992
 993	gmc_v8_0_mc_program(adev);
 994
 995	if (adev->asic_type == CHIP_TONGA) {
 996		r = gmc_v8_0_mc_load_microcode(adev);
 
 
 
 
 
 
 
 
 997		if (r) {
 998			DRM_ERROR("Failed to load MC firmware!\n");
 999			return r;
1000		}
1001	}
1002
1003	r = gmc_v8_0_gart_enable(adev);
1004	if (r)
1005		return r;
1006
1007	return r;
 
 
 
1008}
1009
1010static int gmc_v8_0_hw_fini(void *handle)
1011{
1012	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1013
1014	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
1015	gmc_v8_0_gart_disable(adev);
1016
1017	return 0;
1018}
1019
1020static int gmc_v8_0_suspend(void *handle)
1021{
1022	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1023
1024	if (adev->vm_manager.enabled) {
1025		gmc_v8_0_vm_fini(adev);
1026		adev->vm_manager.enabled = false;
1027	}
1028	gmc_v8_0_hw_fini(adev);
1029
1030	return 0;
1031}
1032
1033static int gmc_v8_0_resume(void *handle)
1034{
1035	int r;
1036	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1037
1038	r = gmc_v8_0_hw_init(adev);
1039	if (r)
1040		return r;
1041
1042	if (!adev->vm_manager.enabled) {
1043		r = gmc_v8_0_vm_init(adev);
1044		if (r) {
1045			dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1046			return r;
1047		}
1048		adev->vm_manager.enabled = true;
1049	}
1050
1051	return r;
1052}
1053
1054static bool gmc_v8_0_is_idle(void *handle)
1055{
1056	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1057	u32 tmp = RREG32(mmSRBM_STATUS);
1058
1059	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1060		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1061		return false;
1062
1063	return true;
1064}
1065
1066static int gmc_v8_0_wait_for_idle(void *handle)
1067{
1068	unsigned i;
1069	u32 tmp;
1070	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1071
1072	for (i = 0; i < adev->usec_timeout; i++) {
1073		/* read MC_STATUS */
1074		tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1075					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1076					       SRBM_STATUS__MCC_BUSY_MASK |
1077					       SRBM_STATUS__MCD_BUSY_MASK |
1078					       SRBM_STATUS__VMC_BUSY_MASK |
1079					       SRBM_STATUS__VMC1_BUSY_MASK);
1080		if (!tmp)
1081			return 0;
1082		udelay(1);
1083	}
1084	return -ETIMEDOUT;
1085
1086}
1087
1088static void gmc_v8_0_print_status(void *handle)
1089{
1090	int i, j;
1091	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1092
1093	dev_info(adev->dev, "GMC 8.x registers\n");
1094	dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
1095		RREG32(mmSRBM_STATUS));
1096	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
1097		RREG32(mmSRBM_STATUS2));
1098
1099	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1100		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
1101	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1102		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
1103	dev_info(adev->dev, "  MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
1104		 RREG32(mmMC_VM_MX_L1_TLB_CNTL));
1105	dev_info(adev->dev, "  VM_L2_CNTL=0x%08X\n",
1106		 RREG32(mmVM_L2_CNTL));
1107	dev_info(adev->dev, "  VM_L2_CNTL2=0x%08X\n",
1108		 RREG32(mmVM_L2_CNTL2));
1109	dev_info(adev->dev, "  VM_L2_CNTL3=0x%08X\n",
1110		 RREG32(mmVM_L2_CNTL3));
1111	dev_info(adev->dev, "  VM_L2_CNTL4=0x%08X\n",
1112		 RREG32(mmVM_L2_CNTL4));
1113	dev_info(adev->dev, "  VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
1114		 RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR));
1115	dev_info(adev->dev, "  VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
1116		 RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR));
1117	dev_info(adev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1118		 RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR));
1119	dev_info(adev->dev, "  VM_CONTEXT0_CNTL2=0x%08X\n",
1120		 RREG32(mmVM_CONTEXT0_CNTL2));
1121	dev_info(adev->dev, "  VM_CONTEXT0_CNTL=0x%08X\n",
1122		 RREG32(mmVM_CONTEXT0_CNTL));
1123	dev_info(adev->dev, "  VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR=0x%08X\n",
1124		 RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR));
1125	dev_info(adev->dev, "  VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR=0x%08X\n",
1126		 RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR));
1127	dev_info(adev->dev, "  mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET=0x%08X\n",
1128		 RREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET));
1129	dev_info(adev->dev, "  VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
1130		 RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR));
1131	dev_info(adev->dev, "  VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
1132		 RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR));
1133	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1134		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR));
1135	dev_info(adev->dev, "  VM_CONTEXT1_CNTL2=0x%08X\n",
1136		 RREG32(mmVM_CONTEXT1_CNTL2));
1137	dev_info(adev->dev, "  VM_CONTEXT1_CNTL=0x%08X\n",
1138		 RREG32(mmVM_CONTEXT1_CNTL));
1139	for (i = 0; i < 16; i++) {
1140		if (i < 8)
1141			dev_info(adev->dev, "  VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1142				 i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i));
1143		else
1144			dev_info(adev->dev, "  VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1145				 i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8));
1146	}
1147	dev_info(adev->dev, "  MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
1148		 RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR));
1149	dev_info(adev->dev, "  MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
1150		 RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR));
1151	dev_info(adev->dev, "  MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
1152		 RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR));
1153	dev_info(adev->dev, "  MC_VM_FB_LOCATION=0x%08X\n",
1154		 RREG32(mmMC_VM_FB_LOCATION));
1155	dev_info(adev->dev, "  MC_VM_AGP_BASE=0x%08X\n",
1156		 RREG32(mmMC_VM_AGP_BASE));
1157	dev_info(adev->dev, "  MC_VM_AGP_TOP=0x%08X\n",
1158		 RREG32(mmMC_VM_AGP_TOP));
1159	dev_info(adev->dev, "  MC_VM_AGP_BOT=0x%08X\n",
1160		 RREG32(mmMC_VM_AGP_BOT));
1161
1162	dev_info(adev->dev, "  HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
1163		 RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL));
1164	dev_info(adev->dev, "  HDP_NONSURFACE_BASE=0x%08X\n",
1165		 RREG32(mmHDP_NONSURFACE_BASE));
1166	dev_info(adev->dev, "  HDP_NONSURFACE_INFO=0x%08X\n",
1167		 RREG32(mmHDP_NONSURFACE_INFO));
1168	dev_info(adev->dev, "  HDP_NONSURFACE_SIZE=0x%08X\n",
1169		 RREG32(mmHDP_NONSURFACE_SIZE));
1170	dev_info(adev->dev, "  HDP_MISC_CNTL=0x%08X\n",
1171		 RREG32(mmHDP_MISC_CNTL));
1172	dev_info(adev->dev, "  HDP_HOST_PATH_CNTL=0x%08X\n",
1173		 RREG32(mmHDP_HOST_PATH_CNTL));
1174
1175	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
1176		dev_info(adev->dev, "  %d:\n", i);
1177		dev_info(adev->dev, "  0x%04X=0x%08X\n",
1178			 0xb05 + j, RREG32(0xb05 + j));
1179		dev_info(adev->dev, "  0x%04X=0x%08X\n",
1180			 0xb06 + j, RREG32(0xb06 + j));
1181		dev_info(adev->dev, "  0x%04X=0x%08X\n",
1182			 0xb07 + j, RREG32(0xb07 + j));
1183		dev_info(adev->dev, "  0x%04X=0x%08X\n",
1184			 0xb08 + j, RREG32(0xb08 + j));
1185		dev_info(adev->dev, "  0x%04X=0x%08X\n",
1186			 0xb09 + j, RREG32(0xb09 + j));
1187	}
1188
1189	dev_info(adev->dev, "  BIF_FB_EN=0x%08X\n",
1190		 RREG32(mmBIF_FB_EN));
1191}
1192
1193static int gmc_v8_0_soft_reset(void *handle)
1194{
1195	struct amdgpu_mode_mc_save save;
1196	u32 srbm_soft_reset = 0;
1197	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1198	u32 tmp = RREG32(mmSRBM_STATUS);
1199
1200	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1201		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1202						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1203
1204	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1205		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1206		if (!(adev->flags & AMD_IS_APU))
1207			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1208							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1209	}
1210
1211	if (srbm_soft_reset) {
1212		gmc_v8_0_print_status((void *)adev);
 
 
 
 
 
 
1213
1214		gmc_v8_0_mc_stop(adev, &save);
1215		if (gmc_v8_0_wait_for_idle(adev)) {
1216			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1217		}
1218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1219
1220		tmp = RREG32(mmSRBM_SOFT_RESET);
1221		tmp |= srbm_soft_reset;
1222		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1223		WREG32(mmSRBM_SOFT_RESET, tmp);
1224		tmp = RREG32(mmSRBM_SOFT_RESET);
1225
1226		udelay(50);
1227
1228		tmp &= ~srbm_soft_reset;
1229		WREG32(mmSRBM_SOFT_RESET, tmp);
1230		tmp = RREG32(mmSRBM_SOFT_RESET);
1231
1232		/* Wait a little for things to settle down */
1233		udelay(50);
 
1234
1235		gmc_v8_0_mc_resume(adev, &save);
1236		udelay(50);
1237
1238		gmc_v8_0_print_status((void *)adev);
1239	}
 
 
 
 
1240
 
1241	return 0;
1242}
1243
1244static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1245					     struct amdgpu_irq_src *src,
1246					     unsigned type,
1247					     enum amdgpu_interrupt_state state)
1248{
1249	u32 tmp;
1250	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1251		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1252		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1253		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1254		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1255		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1256		    VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1257
1258	switch (state) {
1259	case AMDGPU_IRQ_STATE_DISABLE:
1260		/* system context */
1261		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1262		tmp &= ~bits;
1263		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1264		/* VMs */
1265		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1266		tmp &= ~bits;
1267		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1268		break;
1269	case AMDGPU_IRQ_STATE_ENABLE:
1270		/* system context */
1271		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1272		tmp |= bits;
1273		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1274		/* VMs */
1275		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1276		tmp |= bits;
1277		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1278		break;
1279	default:
1280		break;
1281	}
1282
1283	return 0;
1284}
1285
1286static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1287				      struct amdgpu_irq_src *source,
1288				      struct amdgpu_iv_entry *entry)
1289{
1290	u32 addr, status, mc_client;
 
 
 
 
 
 
 
1291
1292	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1293	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1294	mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1295	/* reset addr and status */
1296	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1297
1298	if (!addr && !status)
1299		return 0;
1300
1301	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1302		gmc_v8_0_set_fault_enable_default(adev, false);
1303
1304	dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1305		entry->src_id, entry->src_data);
1306	dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1307		addr);
1308	dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1309		status);
1310	gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1311
1312	return 0;
1313}
1314
1315static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1316		bool enable)
1317{
1318	uint32_t data;
1319
1320	if (enable) {
1321		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1322		data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1323		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1324
1325		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1326		data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1327		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1328
1329		data = RREG32(mmMC_HUB_MISC_VM_CG);
1330		data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1331		WREG32(mmMC_HUB_MISC_VM_CG, data);
1332
1333		data = RREG32(mmMC_XPB_CLK_GAT);
1334		data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1335		WREG32(mmMC_XPB_CLK_GAT, data);
1336
1337		data = RREG32(mmATC_MISC_CG);
1338		data |= ATC_MISC_CG__ENABLE_MASK;
1339		WREG32(mmATC_MISC_CG, data);
1340
1341		data = RREG32(mmMC_CITF_MISC_WR_CG);
1342		data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1343		WREG32(mmMC_CITF_MISC_WR_CG, data);
1344
1345		data = RREG32(mmMC_CITF_MISC_RD_CG);
1346		data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1347		WREG32(mmMC_CITF_MISC_RD_CG, data);
1348
1349		data = RREG32(mmMC_CITF_MISC_VM_CG);
1350		data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1351		WREG32(mmMC_CITF_MISC_VM_CG, data);
1352
1353		data = RREG32(mmVM_L2_CG);
1354		data |= VM_L2_CG__ENABLE_MASK;
1355		WREG32(mmVM_L2_CG, data);
1356	} else {
1357		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1358		data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1359		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1360
1361		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1362		data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1363		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1364
1365		data = RREG32(mmMC_HUB_MISC_VM_CG);
1366		data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1367		WREG32(mmMC_HUB_MISC_VM_CG, data);
1368
1369		data = RREG32(mmMC_XPB_CLK_GAT);
1370		data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1371		WREG32(mmMC_XPB_CLK_GAT, data);
1372
1373		data = RREG32(mmATC_MISC_CG);
1374		data &= ~ATC_MISC_CG__ENABLE_MASK;
1375		WREG32(mmATC_MISC_CG, data);
1376
1377		data = RREG32(mmMC_CITF_MISC_WR_CG);
1378		data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1379		WREG32(mmMC_CITF_MISC_WR_CG, data);
1380
1381		data = RREG32(mmMC_CITF_MISC_RD_CG);
1382		data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1383		WREG32(mmMC_CITF_MISC_RD_CG, data);
1384
1385		data = RREG32(mmMC_CITF_MISC_VM_CG);
1386		data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1387		WREG32(mmMC_CITF_MISC_VM_CG, data);
1388
1389		data = RREG32(mmVM_L2_CG);
1390		data &= ~VM_L2_CG__ENABLE_MASK;
1391		WREG32(mmVM_L2_CG, data);
1392	}
1393}
1394
1395static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1396		bool enable)
1397{
1398	uint32_t data;
1399
1400	if (enable) {
1401		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1402		data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1403		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1404
1405		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1406		data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1407		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1408
1409		data = RREG32(mmMC_HUB_MISC_VM_CG);
1410		data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1411		WREG32(mmMC_HUB_MISC_VM_CG, data);
1412
1413		data = RREG32(mmMC_XPB_CLK_GAT);
1414		data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1415		WREG32(mmMC_XPB_CLK_GAT, data);
1416
1417		data = RREG32(mmATC_MISC_CG);
1418		data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1419		WREG32(mmATC_MISC_CG, data);
1420
1421		data = RREG32(mmMC_CITF_MISC_WR_CG);
1422		data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1423		WREG32(mmMC_CITF_MISC_WR_CG, data);
1424
1425		data = RREG32(mmMC_CITF_MISC_RD_CG);
1426		data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1427		WREG32(mmMC_CITF_MISC_RD_CG, data);
1428
1429		data = RREG32(mmMC_CITF_MISC_VM_CG);
1430		data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1431		WREG32(mmMC_CITF_MISC_VM_CG, data);
1432
1433		data = RREG32(mmVM_L2_CG);
1434		data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1435		WREG32(mmVM_L2_CG, data);
1436	} else {
1437		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1438		data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1439		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1440
1441		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1442		data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1443		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1444
1445		data = RREG32(mmMC_HUB_MISC_VM_CG);
1446		data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1447		WREG32(mmMC_HUB_MISC_VM_CG, data);
1448
1449		data = RREG32(mmMC_XPB_CLK_GAT);
1450		data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1451		WREG32(mmMC_XPB_CLK_GAT, data);
1452
1453		data = RREG32(mmATC_MISC_CG);
1454		data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1455		WREG32(mmATC_MISC_CG, data);
1456
1457		data = RREG32(mmMC_CITF_MISC_WR_CG);
1458		data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1459		WREG32(mmMC_CITF_MISC_WR_CG, data);
1460
1461		data = RREG32(mmMC_CITF_MISC_RD_CG);
1462		data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1463		WREG32(mmMC_CITF_MISC_RD_CG, data);
1464
1465		data = RREG32(mmMC_CITF_MISC_VM_CG);
1466		data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1467		WREG32(mmMC_CITF_MISC_VM_CG, data);
1468
1469		data = RREG32(mmVM_L2_CG);
1470		data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1471		WREG32(mmVM_L2_CG, data);
1472	}
1473}
1474
1475static int gmc_v8_0_set_clockgating_state(void *handle,
1476					  enum amd_clockgating_state state)
1477{
1478	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1479
 
 
 
1480	switch (adev->asic_type) {
1481	case CHIP_FIJI:
1482		fiji_update_mc_medium_grain_clock_gating(adev,
1483				state == AMD_CG_STATE_GATE ? true : false);
1484		fiji_update_mc_light_sleep(adev,
1485				state == AMD_CG_STATE_GATE ? true : false);
1486		break;
1487	default:
1488		break;
1489	}
1490	return 0;
1491}
1492
1493static int gmc_v8_0_set_powergating_state(void *handle,
1494					  enum amd_powergating_state state)
1495{
1496	return 0;
1497}
1498
1499const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1500	.early_init = gmc_v8_0_early_init,
1501	.late_init = gmc_v8_0_late_init,
1502	.sw_init = gmc_v8_0_sw_init,
1503	.sw_fini = gmc_v8_0_sw_fini,
1504	.hw_init = gmc_v8_0_hw_init,
1505	.hw_fini = gmc_v8_0_hw_fini,
1506	.suspend = gmc_v8_0_suspend,
1507	.resume = gmc_v8_0_resume,
1508	.is_idle = gmc_v8_0_is_idle,
1509	.wait_for_idle = gmc_v8_0_wait_for_idle,
 
 
1510	.soft_reset = gmc_v8_0_soft_reset,
1511	.print_status = gmc_v8_0_print_status,
1512	.set_clockgating_state = gmc_v8_0_set_clockgating_state,
1513	.set_powergating_state = gmc_v8_0_set_powergating_state,
 
1514};
1515
1516static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
1517	.flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
1518	.set_pte_pde = gmc_v8_0_gart_set_pte_pde,
 
 
 
 
 
 
1519};
1520
1521static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1522	.set = gmc_v8_0_vm_fault_interrupt_state,
1523	.process = gmc_v8_0_process_interrupt,
1524};
1525
1526static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
1527{
1528	if (adev->gart.gart_funcs == NULL)
1529		adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
1530}
1531
1532static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1533{
1534	adev->mc.vm_fault.num_types = 1;
1535	adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1536}
v6.2
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25#include <linux/module.h>
  26#include <linux/pci.h>
  27
  28#include <drm/drm_cache.h>
  29#include "amdgpu.h"
  30#include "gmc_v8_0.h"
  31#include "amdgpu_ucode.h"
  32#include "amdgpu_amdkfd.h"
  33#include "amdgpu_gem.h"
  34
  35#include "gmc/gmc_8_1_d.h"
  36#include "gmc/gmc_8_1_sh_mask.h"
  37
  38#include "bif/bif_5_0_d.h"
  39#include "bif/bif_5_0_sh_mask.h"
  40
  41#include "oss/oss_3_0_d.h"
  42#include "oss/oss_3_0_sh_mask.h"
  43
  44#include "dce/dce_10_0_d.h"
  45#include "dce/dce_10_0_sh_mask.h"
  46
  47#include "vid.h"
  48#include "vi.h"
  49
  50#include "amdgpu_atombios.h"
  51
  52#include "ivsrcid/ivsrcid_vislands30.h"
  53
  54static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
  55static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
  56static int gmc_v8_0_wait_for_idle(void *handle);
  57
  58MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
  59MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
  60MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
  61MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
  62MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin");
  63MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
  64MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
  65MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
  66
  67static const u32 golden_settings_tonga_a11[] =
  68{
  69	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
  70	mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
  71	mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
  72	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  73	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  74	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  75	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  76};
  77
  78static const u32 tonga_mgcg_cgcg_init[] =
  79{
  80	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  81};
  82
  83static const u32 golden_settings_fiji_a10[] =
  84{
  85	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  86	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  87	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  88	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  89};
  90
  91static const u32 fiji_mgcg_cgcg_init[] =
  92{
  93	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  94};
  95
  96static const u32 golden_settings_polaris11_a11[] =
  97{
  98	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  99	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
 100	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
 101	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
 102};
 103
 104static const u32 golden_settings_polaris10_a11[] =
 105{
 106	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
 107	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
 108	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
 109	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
 110	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
 111};
 112
 113static const u32 cz_mgcg_cgcg_init[] =
 114{
 115	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 116};
 117
 118static const u32 stoney_mgcg_cgcg_init[] =
 119{
 120	mmATC_MISC_CG, 0xffffffff, 0x000c0200,
 121	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 122};
 123
 124static const u32 golden_settings_stoney_common[] =
 125{
 126	mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
 127	mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
 128};
 129
 130static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
 131{
 132	switch (adev->asic_type) {
 133	case CHIP_FIJI:
 134		amdgpu_device_program_register_sequence(adev,
 135							fiji_mgcg_cgcg_init,
 136							ARRAY_SIZE(fiji_mgcg_cgcg_init));
 137		amdgpu_device_program_register_sequence(adev,
 138							golden_settings_fiji_a10,
 139							ARRAY_SIZE(golden_settings_fiji_a10));
 140		break;
 141	case CHIP_TONGA:
 142		amdgpu_device_program_register_sequence(adev,
 143							tonga_mgcg_cgcg_init,
 144							ARRAY_SIZE(tonga_mgcg_cgcg_init));
 145		amdgpu_device_program_register_sequence(adev,
 146							golden_settings_tonga_a11,
 147							ARRAY_SIZE(golden_settings_tonga_a11));
 148		break;
 149	case CHIP_POLARIS11:
 150	case CHIP_POLARIS12:
 151	case CHIP_VEGAM:
 152		amdgpu_device_program_register_sequence(adev,
 153							golden_settings_polaris11_a11,
 154							ARRAY_SIZE(golden_settings_polaris11_a11));
 155		break;
 156	case CHIP_POLARIS10:
 157		amdgpu_device_program_register_sequence(adev,
 158							golden_settings_polaris10_a11,
 159							ARRAY_SIZE(golden_settings_polaris10_a11));
 160		break;
 161	case CHIP_CARRIZO:
 162		amdgpu_device_program_register_sequence(adev,
 163							cz_mgcg_cgcg_init,
 164							ARRAY_SIZE(cz_mgcg_cgcg_init));
 165		break;
 166	case CHIP_STONEY:
 167		amdgpu_device_program_register_sequence(adev,
 168							stoney_mgcg_cgcg_init,
 169							ARRAY_SIZE(stoney_mgcg_cgcg_init));
 170		amdgpu_device_program_register_sequence(adev,
 171							golden_settings_stoney_common,
 172							ARRAY_SIZE(golden_settings_stoney_common));
 173		break;
 174	default:
 175		break;
 176	}
 177}
 178
 179static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 180{
 181	u32 blackout;
 182
 183	gmc_v8_0_wait_for_idle(adev);
 
 
 
 184
 185	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 186	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
 187		/* Block CPU access */
 188		WREG32(mmBIF_FB_EN, 0);
 189		/* blackout the MC */
 190		blackout = REG_SET_FIELD(blackout,
 191					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
 192		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
 193	}
 194	/* wait for the MC to settle */
 195	udelay(100);
 196}
 197
 198static void gmc_v8_0_mc_resume(struct amdgpu_device *adev)
 
 199{
 200	u32 tmp;
 201
 202	/* unblackout the MC */
 203	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 204	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
 205	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
 206	/* allow CPU access */
 207	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
 208	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
 209	WREG32(mmBIF_FB_EN, tmp);
 
 
 
 210}
 211
 212/**
 213 * gmc_v8_0_init_microcode - load ucode images from disk
 214 *
 215 * @adev: amdgpu_device pointer
 216 *
 217 * Use the firmware interface to load the ucode images into
 218 * the driver (not loaded into hw).
 219 * Returns 0 on success, error on failure.
 220 */
 221static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
 222{
 223	const char *chip_name;
 224	char fw_name[30];
 225	int err;
 226
 227	DRM_DEBUG("\n");
 228
 229	switch (adev->asic_type) {
 230	case CHIP_TONGA:
 231		chip_name = "tonga";
 232		break;
 233	case CHIP_POLARIS11:
 234		if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
 235		    ASICID_IS_P31(adev->pdev->device, adev->pdev->revision))
 236			chip_name = "polaris11_k";
 237		else
 238			chip_name = "polaris11";
 239		break;
 240	case CHIP_POLARIS10:
 241		if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision))
 242			chip_name = "polaris10_k";
 243		else
 244			chip_name = "polaris10";
 245		break;
 246	case CHIP_POLARIS12:
 247		if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
 248			chip_name = "polaris12_k";
 249		} else {
 250			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159);
 251			/* Polaris12 32bit ASIC needs a special MC firmware */
 252			if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40)
 253				chip_name = "polaris12_32";
 254			else
 255				chip_name = "polaris12";
 256		}
 257		break;
 258	case CHIP_FIJI:
 259	case CHIP_CARRIZO:
 260	case CHIP_STONEY:
 261	case CHIP_VEGAM:
 262		return 0;
 263	default: BUG();
 264	}
 265
 266	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
 267	err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
 268	if (err)
 269		goto out;
 270	err = amdgpu_ucode_validate(adev->gmc.fw);
 271
 272out:
 273	if (err) {
 274		pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
 275		release_firmware(adev->gmc.fw);
 276		adev->gmc.fw = NULL;
 
 
 277	}
 278	return err;
 279}
 280
 281/**
 282 * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw
 283 *
 284 * @adev: amdgpu_device pointer
 285 *
 286 * Load the GDDR MC ucode into the hw (VI).
 287 * Returns 0 on success, error on failure.
 288 */
 289static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
 290{
 291	const struct mc_firmware_header_v1_0 *hdr;
 292	const __le32 *fw_data = NULL;
 293	const __le32 *io_mc_regs = NULL;
 294	u32 running;
 295	int i, ucode_size, regs_size;
 296
 
 
 
 297	/* Skip MC ucode loading on SR-IOV capable boards.
 298	 * vbios does this for us in asic_init in that case.
 299	 * Skip MC ucode loading on VF, because hypervisor will do that
 300	 * for this adaptor.
 301	 */
 302	if (amdgpu_sriov_bios(adev))
 303		return 0;
 304
 305	if (!adev->gmc.fw)
 306		return -EINVAL;
 307
 308	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
 309	amdgpu_ucode_print_mc_hdr(&hdr->header);
 310
 311	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 312	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
 313	io_mc_regs = (const __le32 *)
 314		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
 315	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 316	fw_data = (const __le32 *)
 317		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 318
 319	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
 320
 321	if (running == 0) {
 
 
 
 
 
 322		/* reset the engine and set to writable */
 323		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 324		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
 325
 326		/* load mc io regs */
 327		for (i = 0; i < regs_size; i++) {
 328			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
 329			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
 330		}
 331		/* load the MC ucode */
 332		for (i = 0; i < ucode_size; i++)
 333			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
 334
 335		/* put the engine back into the active state */
 336		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 337		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
 338		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
 339
 340		/* wait for training to complete */
 341		for (i = 0; i < adev->usec_timeout; i++) {
 342			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
 343					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
 344				break;
 345			udelay(1);
 346		}
 347		for (i = 0; i < adev->usec_timeout; i++) {
 348			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
 349					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
 350				break;
 351			udelay(1);
 352		}
 353	}
 354
 355	return 0;
 356}
 357
 358static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
 359{
 360	const struct mc_firmware_header_v1_0 *hdr;
 361	const __le32 *fw_data = NULL;
 362	const __le32 *io_mc_regs = NULL;
 363	u32 data;
 364	int i, ucode_size, regs_size;
 365
 366	/* Skip MC ucode loading on SR-IOV capable boards.
 367	 * vbios does this for us in asic_init in that case.
 368	 * Skip MC ucode loading on VF, because hypervisor will do that
 369	 * for this adaptor.
 370	 */
 371	if (amdgpu_sriov_bios(adev))
 372		return 0;
 373
 374	if (!adev->gmc.fw)
 375		return -EINVAL;
 376
 377	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
 378	amdgpu_ucode_print_mc_hdr(&hdr->header);
 379
 380	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 381	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
 382	io_mc_regs = (const __le32 *)
 383		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
 384	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 385	fw_data = (const __le32 *)
 386		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 387
 388	data = RREG32(mmMC_SEQ_MISC0);
 389	data &= ~(0x40);
 390	WREG32(mmMC_SEQ_MISC0, data);
 391
 392	/* load mc io regs */
 393	for (i = 0; i < regs_size; i++) {
 394		WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
 395		WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
 396	}
 397
 398	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 399	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
 400
 401	/* load the MC ucode */
 402	for (i = 0; i < ucode_size; i++)
 403		WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
 404
 405	/* put the engine back into the active state */
 406	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 407	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
 408	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
 409
 410	/* wait for training to complete */
 411	for (i = 0; i < adev->usec_timeout; i++) {
 412		data = RREG32(mmMC_SEQ_MISC0);
 413		if (data & 0x80)
 414			break;
 415		udelay(1);
 416	}
 417
 418	return 0;
 419}
 420
 421static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
 422				       struct amdgpu_gmc *mc)
 423{
 424	u64 base = 0;
 425
 426	if (!amdgpu_sriov_vf(adev))
 427		base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
 428	base <<= 24;
 429
 430	amdgpu_gmc_vram_location(adev, mc, base);
 431	amdgpu_gmc_gart_location(adev, mc);
 
 432}
 433
 434/**
 435 * gmc_v8_0_mc_program - program the GPU memory controller
 436 *
 437 * @adev: amdgpu_device pointer
 438 *
 439 * Set the location of vram, gart, and AGP in the GPU's
 440 * physical address space (VI).
 441 */
 442static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
 443{
 
 444	u32 tmp;
 445	int i, j;
 446
 447	/* Initialize HDP */
 448	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
 449		WREG32((0xb05 + j), 0x00000000);
 450		WREG32((0xb06 + j), 0x00000000);
 451		WREG32((0xb07 + j), 0x00000000);
 452		WREG32((0xb08 + j), 0x00000000);
 453		WREG32((0xb09 + j), 0x00000000);
 454	}
 455	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 456
 457	if (gmc_v8_0_wait_for_idle((void *)adev)) {
 
 
 
 
 458		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 459	}
 460	if (adev->mode_info.num_crtc) {
 461		/* Lockout access through VGA aperture*/
 462		tmp = RREG32(mmVGA_HDP_CONTROL);
 463		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
 464		WREG32(mmVGA_HDP_CONTROL, tmp);
 465
 466		/* disable VGA render */
 467		tmp = RREG32(mmVGA_RENDER_CONTROL);
 468		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
 469		WREG32(mmVGA_RENDER_CONTROL, tmp);
 470	}
 471	/* Update configuration */
 472	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 473	       adev->gmc.vram_start >> 12);
 474	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 475	       adev->gmc.vram_end >> 12);
 476	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
 477	       adev->vram_scratch.gpu_addr >> 12);
 478
 479	if (amdgpu_sriov_vf(adev)) {
 480		tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
 481		tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF);
 482		WREG32(mmMC_VM_FB_LOCATION, tmp);
 483		/* XXX double check these! */
 484		WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
 485		WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
 486		WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
 487	}
 488
 489	WREG32(mmMC_VM_AGP_BASE, 0);
 490	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
 491	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
 492	if (gmc_v8_0_wait_for_idle((void *)adev)) {
 493		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 494	}
 
 495
 496	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
 497
 498	tmp = RREG32(mmHDP_MISC_CNTL);
 499	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
 500	WREG32(mmHDP_MISC_CNTL, tmp);
 501
 502	tmp = RREG32(mmHDP_HOST_PATH_CNTL);
 503	WREG32(mmHDP_HOST_PATH_CNTL, tmp);
 504}
 505
 506/**
 507 * gmc_v8_0_mc_init - initialize the memory controller driver params
 508 *
 509 * @adev: amdgpu_device pointer
 510 *
 511 * Look up the amount of vram, vram width, and decide how to place
 512 * vram and gart within the GPU's physical address space (VI).
 513 * Returns 0 for success.
 514 */
 515static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
 516{
 517	int r;
 518	u32 tmp;
 
 519
 520	adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
 521	if (!adev->gmc.vram_width) {
 522		int chansize, numchan;
 523
 524		/* Get VRAM informations */
 525		tmp = RREG32(mmMC_ARB_RAMCFG);
 526		if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
 527			chansize = 64;
 528		} else {
 529			chansize = 32;
 530		}
 531		tmp = RREG32(mmMC_SHARED_CHMAP);
 532		switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
 533		case 0:
 534		default:
 535			numchan = 1;
 536			break;
 537		case 1:
 538			numchan = 2;
 539			break;
 540		case 2:
 541			numchan = 4;
 542			break;
 543		case 3:
 544			numchan = 8;
 545			break;
 546		case 4:
 547			numchan = 3;
 548			break;
 549		case 5:
 550			numchan = 6;
 551			break;
 552		case 6:
 553			numchan = 10;
 554			break;
 555		case 7:
 556			numchan = 12;
 557			break;
 558		case 8:
 559			numchan = 16;
 560			break;
 561		}
 562		adev->gmc.vram_width = numchan * chansize;
 563	}
 
 
 
 
 564	/* size in MB on si */
 565	tmp = RREG32(mmCONFIG_MEMSIZE);
 566	/* some boards may have garbage in the upper 16 bits */
 567	if (tmp & 0xffff0000) {
 568		DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
 569		if (tmp & 0xffff)
 570			tmp &= 0xffff;
 571	}
 572	adev->gmc.mc_vram_size = tmp * 1024ULL * 1024ULL;
 573	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
 574
 575	if (!(adev->flags & AMD_IS_APU)) {
 576		r = amdgpu_device_resize_fb_bar(adev);
 577		if (r)
 578			return r;
 579	}
 580	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
 581	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 582
 583#ifdef CONFIG_X86_64
 584	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
 585		adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
 586		adev->gmc.aper_size = adev->gmc.real_vram_size;
 587	}
 588#endif
 589
 590	/* In case the PCI BAR is larger than the actual amount of vram */
 591	adev->gmc.visible_vram_size = adev->gmc.aper_size;
 592	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
 593		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
 594
 595	/* set the gart size */
 596	if (amdgpu_gart_size == -1) {
 597		switch (adev->asic_type) {
 598		case CHIP_POLARIS10: /* all engines support GPUVM */
 599		case CHIP_POLARIS11: /* all engines support GPUVM */
 600		case CHIP_POLARIS12: /* all engines support GPUVM */
 601		case CHIP_VEGAM:     /* all engines support GPUVM */
 602		default:
 603			adev->gmc.gart_size = 256ULL << 20;
 604			break;
 605		case CHIP_TONGA:   /* UVD, VCE do not support GPUVM */
 606		case CHIP_FIJI:    /* UVD, VCE do not support GPUVM */
 607		case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
 608		case CHIP_STONEY:  /* UVD does not support GPUVM, DCE SG support */
 609			adev->gmc.gart_size = 1024ULL << 20;
 610			break;
 611		}
 612	} else {
 613		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
 614	}
 615
 616	adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
 617	gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
 
 
 
 
 
 618
 619	return 0;
 620}
 621
 622/**
 623 * gmc_v8_0_flush_gpu_tlb_pasid - tlb flush via pasid
 624 *
 625 * @adev: amdgpu_device pointer
 626 * @pasid: pasid to be flush
 627 * @flush_type: type of flush
 628 * @all_hub: flush all hubs
 629 *
 630 * Flush the TLB for the requested pasid.
 631 */
 632static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
 633					uint16_t pasid, uint32_t flush_type,
 634					bool all_hub)
 635{
 636	int vmid;
 637	unsigned int tmp;
 638
 639	if (amdgpu_in_reset(adev))
 640		return -EIO;
 641
 642	for (vmid = 1; vmid < 16; vmid++) {
 643
 644		tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
 645		if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
 646			(tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
 647			WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 648			RREG32(mmVM_INVALIDATE_RESPONSE);
 649			break;
 650		}
 651	}
 652
 653	return 0;
 654
 655}
 656
 657/*
 658 * GART
 659 * VMID 0 is the physical GPU addresses as used by the kernel.
 660 * VMIDs 1-15 are used for userspace clients and are handled
 661 * by the amdgpu vm/hsa code.
 662 */
 663
 664/**
 665 * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
 666 *
 667 * @adev: amdgpu_device pointer
 668 * @vmid: vm instance to flush
 669 * @vmhub: which hub to flush
 670 * @flush_type: type of flush
 671 *
 672 * Flush the TLB for the requested page table (VI).
 673 */
 674static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 675					uint32_t vmhub, uint32_t flush_type)
 676{
 
 
 
 677	/* bits 0-15 are the VM contexts0-15 */
 678	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 679}
 680
 681static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 682					    unsigned vmid, uint64_t pd_addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 683{
 684	uint32_t reg;
 
 685
 686	if (vmid < 8)
 687		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
 688	else
 689		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
 690	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 691
 692	/* bits 0-15 are the VM contexts0-15 */
 693	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
 694
 695	return pd_addr;
 696}
 697
 698static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
 699					unsigned pasid)
 700{
 701	amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
 702}
 703
 704/*
 705 * PTE format on VI:
 706 * 63:40 reserved
 707 * 39:12 4k physical page base address
 708 * 11:7 fragment
 709 * 6 write
 710 * 5 read
 711 * 4 exe
 712 * 3 reserved
 713 * 2 snooped
 714 * 1 system
 715 * 0 valid
 716 *
 717 * PDE format on VI:
 718 * 63:59 block fragment size
 719 * 58:40 reserved
 720 * 39:1 physical base address of PTE
 721 * bits 5:1 must be 0.
 722 * 0 valid
 723 */
 724
 725static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
 726				uint64_t *addr, uint64_t *flags)
 727{
 728	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
 729}
 730
 731static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
 732				struct amdgpu_bo_va_mapping *mapping,
 733				uint64_t *flags)
 734{
 735	*flags &= ~AMDGPU_PTE_EXECUTABLE;
 736	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
 737	*flags &= ~AMDGPU_PTE_PRT;
 738}
 739
 740/**
 741 * gmc_v8_0_set_fault_enable_default - update VM fault handling
 742 *
 743 * @adev: amdgpu_device pointer
 744 * @value: true redirects VM faults to the default page
 745 */
 746static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
 747					      bool value)
 748{
 749	u32 tmp;
 750
 751	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 752	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 753			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 754	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 755			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 756	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 757			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 758	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 759			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 760	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 761			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 762	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 763			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 764	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 765			    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 766	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 767}
 768
 769/**
 770 * gmc_v8_0_set_prt - set PRT VM fault
 771 *
 772 * @adev: amdgpu_device pointer
 773 * @enable: enable/disable VM fault handling for PRT
 774*/
 775static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
 776{
 777	u32 tmp;
 778
 779	if (enable && !adev->gmc.prt_warning) {
 780		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
 781		adev->gmc.prt_warning = true;
 782	}
 783
 784	tmp = RREG32(mmVM_PRT_CNTL);
 785	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 786			    CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
 787	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 788			    CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
 789	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 790			    TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
 791	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 792			    TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
 793	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 794			    L2_CACHE_STORE_INVALID_ENTRIES, enable);
 795	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 796			    L1_TLB_STORE_INVALID_ENTRIES, enable);
 797	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 798			    MASK_PDE0_FAULT, enable);
 799	WREG32(mmVM_PRT_CNTL, tmp);
 800
 801	if (enable) {
 802		uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
 803		uint32_t high = adev->vm_manager.max_pfn -
 804			(AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
 805
 806		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
 807		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
 808		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
 809		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
 810		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
 811		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
 812		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
 813		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
 814	} else {
 815		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
 816		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
 817		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
 818		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
 819		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
 820		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
 821		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
 822		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
 823	}
 824}
 825
 826/**
 827 * gmc_v8_0_gart_enable - gart enable
 828 *
 829 * @adev: amdgpu_device pointer
 830 *
 831 * This sets up the TLBs, programs the page tables for VMID0,
 832 * sets up the hw for VMIDs 1-15 which are allocated on
 833 * demand, and sets up the global locations for the LDS, GDS,
 834 * and GPUVM for FSA64 clients (VI).
 835 * Returns 0 for success, errors for failure.
 836 */
 837static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 838{
 839	uint64_t table_addr;
 840	u32 tmp, field;
 841	int i;
 842
 843	if (adev->gart.bo == NULL) {
 844		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 845		return -EINVAL;
 846	}
 847	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
 848	table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
 849
 850	/* Setup TLB control */
 851	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
 852	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
 853	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
 854	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
 855	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
 856	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
 857	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
 858	/* Setup L2 cache */
 859	tmp = RREG32(mmVM_L2_CNTL);
 860	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
 861	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
 862	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
 863	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
 864	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
 865	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
 866	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
 867	WREG32(mmVM_L2_CNTL, tmp);
 868	tmp = RREG32(mmVM_L2_CNTL2);
 869	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
 870	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
 871	WREG32(mmVM_L2_CNTL2, tmp);
 872
 873	field = adev->vm_manager.fragment_size;
 874	tmp = RREG32(mmVM_L2_CNTL3);
 875	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
 876	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
 877	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
 878	WREG32(mmVM_L2_CNTL3, tmp);
 879	/* XXX: set to enable PTE/PDE in system memory */
 880	tmp = RREG32(mmVM_L2_CNTL4);
 881	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
 882	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
 883	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
 884	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
 885	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
 886	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
 887	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
 888	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
 889	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
 890	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
 891	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
 892	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
 893	WREG32(mmVM_L2_CNTL4, tmp);
 894	/* setup context0 */
 895	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
 896	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
 897	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
 898	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 899			(u32)(adev->dummy_page_addr >> 12));
 900	WREG32(mmVM_CONTEXT0_CNTL2, 0);
 901	tmp = RREG32(mmVM_CONTEXT0_CNTL);
 902	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
 903	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
 904	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 905	WREG32(mmVM_CONTEXT0_CNTL, tmp);
 906
 907	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
 908	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
 909	WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
 910
 911	/* empty context1-15 */
 912	/* FIXME start with 4G, once using 2 level pt switch to full
 913	 * vm size space
 914	 */
 915	/* set vm size, must be a multiple of 4 */
 916	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
 917	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
 918	for (i = 1; i < AMDGPU_NUM_VMID; i++) {
 919		if (i < 8)
 920			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
 921			       table_addr >> 12);
 922		else
 923			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
 924			       table_addr >> 12);
 925	}
 926
 927	/* enable context1-15 */
 928	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
 929	       (u32)(adev->dummy_page_addr >> 12));
 930	WREG32(mmVM_CONTEXT1_CNTL2, 4);
 931	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 932	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
 933	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
 934	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 935	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 936	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 937	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 938	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 939	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 940	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 941	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
 942			    adev->vm_manager.block_size - 9);
 943	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 944	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
 945		gmc_v8_0_set_fault_enable_default(adev, false);
 946	else
 947		gmc_v8_0_set_fault_enable_default(adev, true);
 948
 949	gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0);
 950	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 951		 (unsigned)(adev->gmc.gart_size >> 20),
 952		 (unsigned long long)table_addr);
 
 953	return 0;
 954}
 955
 956static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
 957{
 958	int r;
 959
 960	if (adev->gart.bo) {
 961		WARN(1, "R600 PCIE GART already initialized\n");
 962		return 0;
 963	}
 964	/* Initialize common gart structure */
 965	r = amdgpu_gart_init(adev);
 966	if (r)
 967		return r;
 968	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 969	adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
 970	return amdgpu_gart_table_vram_alloc(adev);
 971}
 972
 973/**
 974 * gmc_v8_0_gart_disable - gart disable
 975 *
 976 * @adev: amdgpu_device pointer
 977 *
 978 * This disables all VM page table (VI).
 979 */
 980static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
 981{
 982	u32 tmp;
 983
 984	/* Disable all tables */
 985	WREG32(mmVM_CONTEXT0_CNTL, 0);
 986	WREG32(mmVM_CONTEXT1_CNTL, 0);
 987	/* Setup TLB control */
 988	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
 989	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
 990	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
 991	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
 992	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
 993	/* Setup L2 cache */
 994	tmp = RREG32(mmVM_L2_CNTL);
 995	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
 996	WREG32(mmVM_L2_CNTL, tmp);
 997	WREG32(mmVM_L2_CNTL2, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 998}
 999
1000/**
1001 * gmc_v8_0_vm_decode_fault - print human readable fault info
1002 *
1003 * @adev: amdgpu_device pointer
1004 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
1005 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
1006 * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
1007 * @pasid: debug logging only - no functional use
1008 *
1009 * Print human readable fault information (VI).
1010 */
1011static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
1012				     u32 addr, u32 mc_client, unsigned pasid)
1013{
 
1014	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
1015	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1016					PROTECTIONS);
1017	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
1018		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
1019	u32 mc_id;
1020
1021	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1022			      MEMORY_CLIENT_ID);
1023
1024	dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
1025	       protections, vmid, pasid, addr,
1026	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1027			     MEMORY_CLIENT_RW) ?
1028	       "write" : "read", block, mc_client, mc_id);
1029}
1030
1031static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
1032{
1033	switch (mc_seq_vram_type) {
1034	case MC_SEQ_MISC0__MT__GDDR1:
1035		return AMDGPU_VRAM_TYPE_GDDR1;
1036	case MC_SEQ_MISC0__MT__DDR2:
1037		return AMDGPU_VRAM_TYPE_DDR2;
1038	case MC_SEQ_MISC0__MT__GDDR3:
1039		return AMDGPU_VRAM_TYPE_GDDR3;
1040	case MC_SEQ_MISC0__MT__GDDR4:
1041		return AMDGPU_VRAM_TYPE_GDDR4;
1042	case MC_SEQ_MISC0__MT__GDDR5:
1043		return AMDGPU_VRAM_TYPE_GDDR5;
1044	case MC_SEQ_MISC0__MT__HBM:
1045		return AMDGPU_VRAM_TYPE_HBM;
1046	case MC_SEQ_MISC0__MT__DDR3:
1047		return AMDGPU_VRAM_TYPE_DDR3;
1048	default:
1049		return AMDGPU_VRAM_TYPE_UNKNOWN;
1050	}
1051}
1052
1053static int gmc_v8_0_early_init(void *handle)
1054{
1055	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1056
1057	gmc_v8_0_set_gmc_funcs(adev);
1058	gmc_v8_0_set_irq_funcs(adev);
1059
1060	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1061	adev->gmc.shared_aperture_end =
1062		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1063	adev->gmc.private_aperture_start =
1064		adev->gmc.shared_aperture_end + 1;
1065	adev->gmc.private_aperture_end =
1066		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1067
1068	return 0;
1069}
1070
1071static int gmc_v8_0_late_init(void *handle)
1072{
1073	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1074
1075	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
1076		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1077	else
1078		return 0;
1079}
1080
1081static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
1082{
1083	u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
1084	unsigned size;
1085
1086	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1087		size = AMDGPU_VBIOS_VGA_ALLOCATION;
1088	} else {
1089		u32 viewport = RREG32(mmVIEWPORT_SIZE);
1090		size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1091			REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1092			4);
1093	}
1094
1095	return size;
1096}
1097
1098#define mmMC_SEQ_MISC0_FIJI 0xA71
1099
1100static int gmc_v8_0_sw_init(void *handle)
1101{
1102	int r;
 
1103	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1104
1105	adev->num_vmhubs = 1;
1106
1107	if (adev->flags & AMD_IS_APU) {
1108		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
1109	} else {
1110		u32 tmp;
1111
1112		if ((adev->asic_type == CHIP_FIJI) ||
1113		    (adev->asic_type == CHIP_VEGAM))
1114			tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
1115		else
1116			tmp = RREG32(mmMC_SEQ_MISC0);
1117		tmp &= MC_SEQ_MISC0__MT__MASK;
1118		adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
1119	}
1120
1121	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
1122	if (r)
1123		return r;
1124
1125	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
1126	if (r)
1127		return r;
1128
1129	/* Adjust VM size here.
1130	 * Currently set to 4GB ((1 << 20) 4k pages).
1131	 * Max GPUVM size for cayman and SI is 40 bits.
1132	 */
1133	amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
1134
1135	/* Set the internal MC address mask
1136	 * This is the max address of the GPU's
1137	 * internal address space.
1138	 */
1139	adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1140
1141	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
 
 
 
 
 
 
 
 
 
 
 
 
 
1142	if (r) {
1143		pr_warn("No suitable DMA available\n");
1144		return r;
1145	}
1146	adev->need_swiotlb = drm_need_swiotlb(40);
1147
1148	r = gmc_v8_0_init_microcode(adev);
1149	if (r) {
1150		DRM_ERROR("Failed to load mc firmware!\n");
1151		return r;
1152	}
1153
1154	r = gmc_v8_0_mc_init(adev);
1155	if (r)
1156		return r;
1157
1158	amdgpu_gmc_get_vbios_allocations(adev);
1159
1160	/* Memory manager */
1161	r = amdgpu_bo_init(adev);
1162	if (r)
1163		return r;
1164
1165	r = gmc_v8_0_gart_init(adev);
1166	if (r)
1167		return r;
1168
1169	/*
1170	 * number of VMs
1171	 * VMID 0 is reserved for System
1172	 * amdgpu graphics/compute will use VMIDs 1-7
1173	 * amdkfd will use VMIDs 8-15
1174	 */
1175	adev->vm_manager.first_kfd_vmid = 8;
1176	amdgpu_vm_manager_init(adev);
1177
1178	/* base offset of vram pages */
1179	if (adev->flags & AMD_IS_APU) {
1180		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
1181
1182		tmp <<= 22;
1183		adev->vm_manager.vram_base_offset = tmp;
1184	} else {
1185		adev->vm_manager.vram_base_offset = 0;
1186	}
1187
1188	adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
1189					GFP_KERNEL);
1190	if (!adev->gmc.vm_fault_info)
1191		return -ENOMEM;
1192	atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1193
1194	return 0;
1195}
1196
1197static int gmc_v8_0_sw_fini(void *handle)
1198{
1199	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1200
 
 
 
 
 
 
1201	amdgpu_gem_force_release(adev);
1202	amdgpu_vm_manager_fini(adev);
1203	kfree(adev->gmc.vm_fault_info);
1204	amdgpu_gart_table_vram_free(adev);
1205	amdgpu_bo_fini(adev);
1206	release_firmware(adev->gmc.fw);
1207	adev->gmc.fw = NULL;
1208
1209	return 0;
1210}
1211
1212static int gmc_v8_0_hw_init(void *handle)
1213{
1214	int r;
1215	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1216
1217	gmc_v8_0_init_golden_registers(adev);
1218
1219	gmc_v8_0_mc_program(adev);
1220
1221	if (adev->asic_type == CHIP_TONGA) {
1222		r = gmc_v8_0_tonga_mc_load_microcode(adev);
1223		if (r) {
1224			DRM_ERROR("Failed to load MC firmware!\n");
1225			return r;
1226		}
1227	} else if (adev->asic_type == CHIP_POLARIS11 ||
1228			adev->asic_type == CHIP_POLARIS10 ||
1229			adev->asic_type == CHIP_POLARIS12) {
1230		r = gmc_v8_0_polaris_mc_load_microcode(adev);
1231		if (r) {
1232			DRM_ERROR("Failed to load MC firmware!\n");
1233			return r;
1234		}
1235	}
1236
1237	r = gmc_v8_0_gart_enable(adev);
1238	if (r)
1239		return r;
1240
1241	if (amdgpu_emu_mode == 1)
1242		return amdgpu_gmc_vram_checking(adev);
1243	else
1244		return r;
1245}
1246
1247static int gmc_v8_0_hw_fini(void *handle)
1248{
1249	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1250
1251	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1252	gmc_v8_0_gart_disable(adev);
1253
1254	return 0;
1255}
1256
1257static int gmc_v8_0_suspend(void *handle)
1258{
1259	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1260
 
 
 
 
1261	gmc_v8_0_hw_fini(adev);
1262
1263	return 0;
1264}
1265
1266static int gmc_v8_0_resume(void *handle)
1267{
1268	int r;
1269	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1270
1271	r = gmc_v8_0_hw_init(adev);
1272	if (r)
1273		return r;
1274
1275	amdgpu_vmid_reset_all(adev);
 
 
 
 
 
 
 
1276
1277	return 0;
1278}
1279
1280static bool gmc_v8_0_is_idle(void *handle)
1281{
1282	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1283	u32 tmp = RREG32(mmSRBM_STATUS);
1284
1285	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1286		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1287		return false;
1288
1289	return true;
1290}
1291
1292static int gmc_v8_0_wait_for_idle(void *handle)
1293{
1294	unsigned i;
1295	u32 tmp;
1296	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1297
1298	for (i = 0; i < adev->usec_timeout; i++) {
1299		/* read MC_STATUS */
1300		tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1301					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1302					       SRBM_STATUS__MCC_BUSY_MASK |
1303					       SRBM_STATUS__MCD_BUSY_MASK |
1304					       SRBM_STATUS__VMC_BUSY_MASK |
1305					       SRBM_STATUS__VMC1_BUSY_MASK);
1306		if (!tmp)
1307			return 0;
1308		udelay(1);
1309	}
1310	return -ETIMEDOUT;
1311
1312}
1313
1314static bool gmc_v8_0_check_soft_reset(void *handle)
1315{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1316	u32 srbm_soft_reset = 0;
1317	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1318	u32 tmp = RREG32(mmSRBM_STATUS);
1319
1320	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1321		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1322						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1323
1324	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1325		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1326		if (!(adev->flags & AMD_IS_APU))
1327			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1328							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1329	}
 
1330	if (srbm_soft_reset) {
1331		adev->gmc.srbm_soft_reset = srbm_soft_reset;
1332		return true;
1333	} else {
1334		adev->gmc.srbm_soft_reset = 0;
1335		return false;
1336	}
1337}
1338
1339static int gmc_v8_0_pre_soft_reset(void *handle)
1340{
1341	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
1342
1343	if (!adev->gmc.srbm_soft_reset)
1344		return 0;
1345
1346	gmc_v8_0_mc_stop(adev);
1347	if (gmc_v8_0_wait_for_idle(adev)) {
1348		dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1349	}
1350
1351	return 0;
1352}
1353
1354static int gmc_v8_0_soft_reset(void *handle)
1355{
1356	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1357	u32 srbm_soft_reset;
1358
1359	if (!adev->gmc.srbm_soft_reset)
1360		return 0;
1361	srbm_soft_reset = adev->gmc.srbm_soft_reset;
1362
1363	if (srbm_soft_reset) {
1364		u32 tmp;
1365
1366		tmp = RREG32(mmSRBM_SOFT_RESET);
1367		tmp |= srbm_soft_reset;
1368		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1369		WREG32(mmSRBM_SOFT_RESET, tmp);
1370		tmp = RREG32(mmSRBM_SOFT_RESET);
1371
1372		udelay(50);
1373
1374		tmp &= ~srbm_soft_reset;
1375		WREG32(mmSRBM_SOFT_RESET, tmp);
1376		tmp = RREG32(mmSRBM_SOFT_RESET);
1377
1378		/* Wait a little for things to settle down */
1379		udelay(50);
1380	}
1381
1382	return 0;
1383}
1384
1385static int gmc_v8_0_post_soft_reset(void *handle)
1386{
1387	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1388
1389	if (!adev->gmc.srbm_soft_reset)
1390		return 0;
1391
1392	gmc_v8_0_mc_resume(adev);
1393	return 0;
1394}
1395
1396static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1397					     struct amdgpu_irq_src *src,
1398					     unsigned type,
1399					     enum amdgpu_interrupt_state state)
1400{
1401	u32 tmp;
1402	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1403		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1404		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1405		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1406		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1407		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1408		    VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1409
1410	switch (state) {
1411	case AMDGPU_IRQ_STATE_DISABLE:
1412		/* system context */
1413		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1414		tmp &= ~bits;
1415		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1416		/* VMs */
1417		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1418		tmp &= ~bits;
1419		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1420		break;
1421	case AMDGPU_IRQ_STATE_ENABLE:
1422		/* system context */
1423		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1424		tmp |= bits;
1425		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1426		/* VMs */
1427		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1428		tmp |= bits;
1429		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1430		break;
1431	default:
1432		break;
1433	}
1434
1435	return 0;
1436}
1437
1438static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1439				      struct amdgpu_irq_src *source,
1440				      struct amdgpu_iv_entry *entry)
1441{
1442	u32 addr, status, mc_client, vmid;
1443
1444	if (amdgpu_sriov_vf(adev)) {
1445		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1446			entry->src_id, entry->src_data[0]);
1447		dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n");
1448		return 0;
1449	}
1450
1451	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1452	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1453	mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1454	/* reset addr and status */
1455	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1456
1457	if (!addr && !status)
1458		return 0;
1459
1460	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1461		gmc_v8_0_set_fault_enable_default(adev, false);
1462
1463	if (printk_ratelimit()) {
1464		struct amdgpu_task_info task_info;
1465
1466		memset(&task_info, 0, sizeof(struct amdgpu_task_info));
1467		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
1468
1469		dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n",
1470			entry->src_id, entry->src_data[0], task_info.process_name,
1471			task_info.tgid, task_info.task_name, task_info.pid);
1472		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1473			addr);
1474		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1475			status);
1476		gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client,
1477					 entry->pasid);
1478	}
1479
1480	vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1481			     VMID);
1482	if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
1483		&& !atomic_read(&adev->gmc.vm_fault_info_updated)) {
1484		struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
1485		u32 protections = REG_GET_FIELD(status,
1486					VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1487					PROTECTIONS);
1488
1489		info->vmid = vmid;
1490		info->mc_id = REG_GET_FIELD(status,
1491					    VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1492					    MEMORY_CLIENT_ID);
1493		info->status = status;
1494		info->page_addr = addr;
1495		info->prot_valid = protections & 0x7 ? true : false;
1496		info->prot_read = protections & 0x8 ? true : false;
1497		info->prot_write = protections & 0x10 ? true : false;
1498		info->prot_exec = protections & 0x20 ? true : false;
1499		mb();
1500		atomic_set(&adev->gmc.vm_fault_info_updated, 1);
1501	}
1502
1503	return 0;
1504}
1505
1506static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1507						     bool enable)
1508{
1509	uint32_t data;
1510
1511	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
1512		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1513		data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1514		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1515
1516		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1517		data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1518		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1519
1520		data = RREG32(mmMC_HUB_MISC_VM_CG);
1521		data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1522		WREG32(mmMC_HUB_MISC_VM_CG, data);
1523
1524		data = RREG32(mmMC_XPB_CLK_GAT);
1525		data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1526		WREG32(mmMC_XPB_CLK_GAT, data);
1527
1528		data = RREG32(mmATC_MISC_CG);
1529		data |= ATC_MISC_CG__ENABLE_MASK;
1530		WREG32(mmATC_MISC_CG, data);
1531
1532		data = RREG32(mmMC_CITF_MISC_WR_CG);
1533		data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1534		WREG32(mmMC_CITF_MISC_WR_CG, data);
1535
1536		data = RREG32(mmMC_CITF_MISC_RD_CG);
1537		data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1538		WREG32(mmMC_CITF_MISC_RD_CG, data);
1539
1540		data = RREG32(mmMC_CITF_MISC_VM_CG);
1541		data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1542		WREG32(mmMC_CITF_MISC_VM_CG, data);
1543
1544		data = RREG32(mmVM_L2_CG);
1545		data |= VM_L2_CG__ENABLE_MASK;
1546		WREG32(mmVM_L2_CG, data);
1547	} else {
1548		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1549		data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1550		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1551
1552		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1553		data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1554		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1555
1556		data = RREG32(mmMC_HUB_MISC_VM_CG);
1557		data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1558		WREG32(mmMC_HUB_MISC_VM_CG, data);
1559
1560		data = RREG32(mmMC_XPB_CLK_GAT);
1561		data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1562		WREG32(mmMC_XPB_CLK_GAT, data);
1563
1564		data = RREG32(mmATC_MISC_CG);
1565		data &= ~ATC_MISC_CG__ENABLE_MASK;
1566		WREG32(mmATC_MISC_CG, data);
1567
1568		data = RREG32(mmMC_CITF_MISC_WR_CG);
1569		data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1570		WREG32(mmMC_CITF_MISC_WR_CG, data);
1571
1572		data = RREG32(mmMC_CITF_MISC_RD_CG);
1573		data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1574		WREG32(mmMC_CITF_MISC_RD_CG, data);
1575
1576		data = RREG32(mmMC_CITF_MISC_VM_CG);
1577		data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1578		WREG32(mmMC_CITF_MISC_VM_CG, data);
1579
1580		data = RREG32(mmVM_L2_CG);
1581		data &= ~VM_L2_CG__ENABLE_MASK;
1582		WREG32(mmVM_L2_CG, data);
1583	}
1584}
1585
1586static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1587				       bool enable)
1588{
1589	uint32_t data;
1590
1591	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
1592		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1593		data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1594		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1595
1596		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1597		data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1598		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1599
1600		data = RREG32(mmMC_HUB_MISC_VM_CG);
1601		data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1602		WREG32(mmMC_HUB_MISC_VM_CG, data);
1603
1604		data = RREG32(mmMC_XPB_CLK_GAT);
1605		data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1606		WREG32(mmMC_XPB_CLK_GAT, data);
1607
1608		data = RREG32(mmATC_MISC_CG);
1609		data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1610		WREG32(mmATC_MISC_CG, data);
1611
1612		data = RREG32(mmMC_CITF_MISC_WR_CG);
1613		data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1614		WREG32(mmMC_CITF_MISC_WR_CG, data);
1615
1616		data = RREG32(mmMC_CITF_MISC_RD_CG);
1617		data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1618		WREG32(mmMC_CITF_MISC_RD_CG, data);
1619
1620		data = RREG32(mmMC_CITF_MISC_VM_CG);
1621		data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1622		WREG32(mmMC_CITF_MISC_VM_CG, data);
1623
1624		data = RREG32(mmVM_L2_CG);
1625		data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1626		WREG32(mmVM_L2_CG, data);
1627	} else {
1628		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1629		data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1630		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1631
1632		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1633		data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1634		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1635
1636		data = RREG32(mmMC_HUB_MISC_VM_CG);
1637		data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1638		WREG32(mmMC_HUB_MISC_VM_CG, data);
1639
1640		data = RREG32(mmMC_XPB_CLK_GAT);
1641		data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1642		WREG32(mmMC_XPB_CLK_GAT, data);
1643
1644		data = RREG32(mmATC_MISC_CG);
1645		data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1646		WREG32(mmATC_MISC_CG, data);
1647
1648		data = RREG32(mmMC_CITF_MISC_WR_CG);
1649		data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1650		WREG32(mmMC_CITF_MISC_WR_CG, data);
1651
1652		data = RREG32(mmMC_CITF_MISC_RD_CG);
1653		data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1654		WREG32(mmMC_CITF_MISC_RD_CG, data);
1655
1656		data = RREG32(mmMC_CITF_MISC_VM_CG);
1657		data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1658		WREG32(mmMC_CITF_MISC_VM_CG, data);
1659
1660		data = RREG32(mmVM_L2_CG);
1661		data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1662		WREG32(mmVM_L2_CG, data);
1663	}
1664}
1665
1666static int gmc_v8_0_set_clockgating_state(void *handle,
1667					  enum amd_clockgating_state state)
1668{
1669	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1670
1671	if (amdgpu_sriov_vf(adev))
1672		return 0;
1673
1674	switch (adev->asic_type) {
1675	case CHIP_FIJI:
1676		fiji_update_mc_medium_grain_clock_gating(adev,
1677				state == AMD_CG_STATE_GATE);
1678		fiji_update_mc_light_sleep(adev,
1679				state == AMD_CG_STATE_GATE);
1680		break;
1681	default:
1682		break;
1683	}
1684	return 0;
1685}
1686
1687static int gmc_v8_0_set_powergating_state(void *handle,
1688					  enum amd_powergating_state state)
1689{
1690	return 0;
1691}
1692
1693static void gmc_v8_0_get_clockgating_state(void *handle, u64 *flags)
1694{
1695	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1696	int data;
1697
1698	if (amdgpu_sriov_vf(adev))
1699		*flags = 0;
1700
1701	/* AMD_CG_SUPPORT_MC_MGCG */
1702	data = RREG32(mmMC_HUB_MISC_HUB_CG);
1703	if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK)
1704		*flags |= AMD_CG_SUPPORT_MC_MGCG;
1705
1706	/* AMD_CG_SUPPORT_MC_LS */
1707	if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK)
1708		*flags |= AMD_CG_SUPPORT_MC_LS;
1709}
1710
1711static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1712	.name = "gmc_v8_0",
1713	.early_init = gmc_v8_0_early_init,
1714	.late_init = gmc_v8_0_late_init,
1715	.sw_init = gmc_v8_0_sw_init,
1716	.sw_fini = gmc_v8_0_sw_fini,
1717	.hw_init = gmc_v8_0_hw_init,
1718	.hw_fini = gmc_v8_0_hw_fini,
1719	.suspend = gmc_v8_0_suspend,
1720	.resume = gmc_v8_0_resume,
1721	.is_idle = gmc_v8_0_is_idle,
1722	.wait_for_idle = gmc_v8_0_wait_for_idle,
1723	.check_soft_reset = gmc_v8_0_check_soft_reset,
1724	.pre_soft_reset = gmc_v8_0_pre_soft_reset,
1725	.soft_reset = gmc_v8_0_soft_reset,
1726	.post_soft_reset = gmc_v8_0_post_soft_reset,
1727	.set_clockgating_state = gmc_v8_0_set_clockgating_state,
1728	.set_powergating_state = gmc_v8_0_set_powergating_state,
1729	.get_clockgating_state = gmc_v8_0_get_clockgating_state,
1730};
1731
1732static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
1733	.flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
1734	.flush_gpu_tlb_pasid = gmc_v8_0_flush_gpu_tlb_pasid,
1735	.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
1736	.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
1737	.set_prt = gmc_v8_0_set_prt,
1738	.get_vm_pde = gmc_v8_0_get_vm_pde,
1739	.get_vm_pte = gmc_v8_0_get_vm_pte,
1740	.get_vbios_fb_size = gmc_v8_0_get_vbios_fb_size,
1741};
1742
1743static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1744	.set = gmc_v8_0_vm_fault_interrupt_state,
1745	.process = gmc_v8_0_process_interrupt,
1746};
1747
1748static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
1749{
1750	adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
 
1751}
1752
1753static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1754{
1755	adev->gmc.vm_fault.num_types = 1;
1756	adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1757}
1758
1759const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
1760{
1761	.type = AMD_IP_BLOCK_TYPE_GMC,
1762	.major = 8,
1763	.minor = 0,
1764	.rev = 0,
1765	.funcs = &gmc_v8_0_ip_funcs,
1766};
1767
1768const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
1769{
1770	.type = AMD_IP_BLOCK_TYPE_GMC,
1771	.major = 8,
1772	.minor = 1,
1773	.rev = 0,
1774	.funcs = &gmc_v8_0_ip_funcs,
1775};
1776
1777const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
1778{
1779	.type = AMD_IP_BLOCK_TYPE_GMC,
1780	.major = 8,
1781	.minor = 5,
1782	.rev = 0,
1783	.funcs = &gmc_v8_0_ip_funcs,
1784};