Linux Audio

Check our new training course

Loading...
v6.13.7
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25#include <linux/module.h>
  26#include <linux/pci.h>
  27
  28#include <drm/drm_cache.h>
  29#include "amdgpu.h"
  30#include "cikd.h"
  31#include "cik.h"
  32#include "gmc_v7_0.h"
  33#include "amdgpu_ucode.h"
  34#include "amdgpu_amdkfd.h"
  35#include "amdgpu_gem.h"
  36
  37#include "bif/bif_4_1_d.h"
  38#include "bif/bif_4_1_sh_mask.h"
  39
  40#include "gmc/gmc_7_1_d.h"
  41#include "gmc/gmc_7_1_sh_mask.h"
  42
  43#include "oss/oss_2_0_d.h"
  44#include "oss/oss_2_0_sh_mask.h"
  45
  46#include "dce/dce_8_0_d.h"
  47#include "dce/dce_8_0_sh_mask.h"
  48
  49#include "amdgpu_atombios.h"
  50
  51#include "ivsrcid/ivsrcid_vislands30.h"
  52
  53static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
  54static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
  55static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
  56
  57MODULE_FIRMWARE("amdgpu/bonaire_mc.bin");
  58MODULE_FIRMWARE("amdgpu/hawaii_mc.bin");
  59MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
  60
  61static const u32 golden_settings_iceland_a11[] = {
 
  62	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  63	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  64	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  65	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
  66};
  67
  68static const u32 iceland_mgcg_cgcg_init[] = {
 
  69	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  70};
  71
  72static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
  73{
  74	switch (adev->asic_type) {
  75	case CHIP_TOPAZ:
  76		amdgpu_device_program_register_sequence(adev,
  77							iceland_mgcg_cgcg_init,
  78							ARRAY_SIZE(iceland_mgcg_cgcg_init));
  79		amdgpu_device_program_register_sequence(adev,
  80							golden_settings_iceland_a11,
  81							ARRAY_SIZE(golden_settings_iceland_a11));
  82		break;
  83	default:
  84		break;
  85	}
  86}
  87
  88static void gmc_v7_0_mc_stop(struct amdgpu_device *adev)
 
  89{
  90	struct amdgpu_ip_block *ip_block;
  91	u32 blackout;
  92
  93	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
  94	if (!ip_block)
  95		return;
  96
  97	gmc_v7_0_wait_for_idle(ip_block);
  98
  99	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 100	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
 101		/* Block CPU access */
 102		WREG32(mmBIF_FB_EN, 0);
 103		/* blackout the MC */
 104		blackout = REG_SET_FIELD(blackout,
 105					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
 106		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
 107	}
 108	/* wait for the MC to settle */
 109	udelay(100);
 110}
 111
 112static void gmc_v7_0_mc_resume(struct amdgpu_device *adev)
 
 113{
 114	u32 tmp;
 115
 116	/* unblackout the MC */
 117	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 118	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
 119	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
 120	/* allow CPU access */
 121	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
 122	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
 123	WREG32(mmBIF_FB_EN, tmp);
 
 
 
 124}
 125
 126/**
 127 * gmc_v7_0_init_microcode - load ucode images from disk
 128 *
 129 * @adev: amdgpu_device pointer
 130 *
 131 * Use the firmware interface to load the ucode images into
 132 * the driver (not loaded into hw).
 133 * Returns 0 on success, error on failure.
 134 */
 135static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
 136{
 137	const char *chip_name;
 
 138	int err;
 139
 140	DRM_DEBUG("\n");
 141
 142	switch (adev->asic_type) {
 143	case CHIP_BONAIRE:
 144		chip_name = "bonaire";
 145		break;
 146	case CHIP_HAWAII:
 147		chip_name = "hawaii";
 148		break;
 149	case CHIP_TOPAZ:
 150		chip_name = "topaz";
 151		break;
 152	case CHIP_KAVERI:
 153	case CHIP_KABINI:
 154	case CHIP_MULLINS:
 155		return 0;
 156	default:
 157		return -EINVAL;
 158	}
 159
 160	err = amdgpu_ucode_request(adev, &adev->gmc.fw, "amdgpu/%s_mc.bin", chip_name);
 
 
 
 
 
 
 
 
 
 
 161	if (err) {
 162		pr_err("cik_mc: Failed to load firmware \"%s_mc.bin\"\n", chip_name);
 163		amdgpu_ucode_release(&adev->gmc.fw);
 
 
 
 164	}
 165	return err;
 166}
 167
 168/**
 169 * gmc_v7_0_mc_load_microcode - load MC ucode into the hw
 170 *
 171 * @adev: amdgpu_device pointer
 172 *
 173 * Load the GDDR MC ucode into the hw (CIK).
 174 * Returns 0 on success, error on failure.
 175 */
 176static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
 177{
 178	const struct mc_firmware_header_v1_0 *hdr;
 179	const __le32 *fw_data = NULL;
 180	const __le32 *io_mc_regs = NULL;
 181	u32 running;
 182	int i, ucode_size, regs_size;
 183
 184	if (!adev->gmc.fw)
 185		return -EINVAL;
 186
 187	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
 188	amdgpu_ucode_print_mc_hdr(&hdr->header);
 189
 190	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 191	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
 192	io_mc_regs = (const __le32 *)
 193		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
 194	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 195	fw_data = (const __le32 *)
 196		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 197
 198	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
 199
 200	if (running == 0) {
 201		/* reset the engine and set to writable */
 202		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 203		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
 204
 205		/* load mc io regs */
 206		for (i = 0; i < regs_size; i++) {
 207			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
 208			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
 209		}
 210		/* load the MC ucode */
 211		for (i = 0; i < ucode_size; i++)
 212			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
 213
 214		/* put the engine back into the active state */
 215		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 216		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
 217		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
 218
 219		/* wait for training to complete */
 220		for (i = 0; i < adev->usec_timeout; i++) {
 221			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
 222					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
 223				break;
 224			udelay(1);
 225		}
 226		for (i = 0; i < adev->usec_timeout; i++) {
 227			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
 228					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
 229				break;
 230			udelay(1);
 231		}
 232	}
 233
 234	return 0;
 235}
 236
 237static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
 238				       struct amdgpu_gmc *mc)
 239{
 240	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
 241
 242	base <<= 24;
 243
 244	amdgpu_gmc_set_agp_default(adev, mc);
 245	amdgpu_gmc_vram_location(adev, mc, base);
 246	amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
 
 
 247}
 248
 249/**
 250 * gmc_v7_0_mc_program - program the GPU memory controller
 251 *
 252 * @adev: amdgpu_device pointer
 253 *
 254 * Set the location of vram, gart, and AGP in the GPU's
 255 * physical address space (CIK).
 256 */
 257static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
 258{
 259	struct amdgpu_ip_block *ip_block;
 260	u32 tmp;
 261	int i, j;
 262
 263	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
 264	if (!ip_block)
 265		return;
 266
 267	/* Initialize HDP */
 268	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
 269		WREG32((0xb05 + j), 0x00000000);
 270		WREG32((0xb06 + j), 0x00000000);
 271		WREG32((0xb07 + j), 0x00000000);
 272		WREG32((0xb08 + j), 0x00000000);
 273		WREG32((0xb09 + j), 0x00000000);
 274	}
 275	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 276
 277	if (gmc_v7_0_wait_for_idle(ip_block))
 278		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 279
 280	if (adev->mode_info.num_crtc) {
 281		/* Lockout access through VGA aperture*/
 282		tmp = RREG32(mmVGA_HDP_CONTROL);
 283		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
 284		WREG32(mmVGA_HDP_CONTROL, tmp);
 285
 286		/* disable VGA render */
 287		tmp = RREG32(mmVGA_RENDER_CONTROL);
 288		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
 289		WREG32(mmVGA_RENDER_CONTROL, tmp);
 290	}
 291	/* Update configuration */
 292	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 293	       adev->gmc.vram_start >> 12);
 294	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 295	       adev->gmc.vram_end >> 12);
 296	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
 297	       adev->mem_scratch.gpu_addr >> 12);
 
 
 
 
 
 
 
 298	WREG32(mmMC_VM_AGP_BASE, 0);
 299	WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22);
 300	WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22);
 301	if (gmc_v7_0_wait_for_idle(ip_block))
 302		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 
 
 303
 304	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
 305
 306	tmp = RREG32(mmHDP_MISC_CNTL);
 307	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
 308	WREG32(mmHDP_MISC_CNTL, tmp);
 309
 310	tmp = RREG32(mmHDP_HOST_PATH_CNTL);
 311	WREG32(mmHDP_HOST_PATH_CNTL, tmp);
 312}
 313
 314/**
 315 * gmc_v7_0_mc_init - initialize the memory controller driver params
 316 *
 317 * @adev: amdgpu_device pointer
 318 *
 319 * Look up the amount of vram, vram width, and decide how to place
 320 * vram and gart within the GPU's physical address space (CIK).
 321 * Returns 0 for success.
 322 */
 323static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
 324{
 325	int r;
 326
 327	adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
 328	if (!adev->gmc.vram_width) {
 329		u32 tmp;
 330		int chansize, numchan;
 331
 332		/* Get VRAM informations */
 333		tmp = RREG32(mmMC_ARB_RAMCFG);
 334		if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE))
 335			chansize = 64;
 336		else
 337			chansize = 32;
 338
 339		tmp = RREG32(mmMC_SHARED_CHMAP);
 340		switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
 341		case 0:
 342		default:
 343			numchan = 1;
 344			break;
 345		case 1:
 346			numchan = 2;
 347			break;
 348		case 2:
 349			numchan = 4;
 350			break;
 351		case 3:
 352			numchan = 8;
 353			break;
 354		case 4:
 355			numchan = 3;
 356			break;
 357		case 5:
 358			numchan = 6;
 359			break;
 360		case 6:
 361			numchan = 10;
 362			break;
 363		case 7:
 364			numchan = 12;
 365			break;
 366		case 8:
 367			numchan = 16;
 368			break;
 369		}
 370		adev->gmc.vram_width = numchan * chansize;
 371	}
 372	/* size in MB on si */
 373	adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 374	adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 375
 376	if (!(adev->flags & AMD_IS_APU)) {
 377		r = amdgpu_device_resize_fb_bar(adev);
 378		if (r)
 379			return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 380	}
 381	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
 382	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 
 
 
 
 
 
 
 
 
 
 383
 384#ifdef CONFIG_X86_64
 385	if ((adev->flags & AMD_IS_APU) &&
 386	    adev->gmc.real_vram_size > adev->gmc.aper_size &&
 387	    !amdgpu_passthrough(adev)) {
 388		adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
 389		adev->gmc.aper_size = adev->gmc.real_vram_size;
 390	}
 391#endif
 392
 393	adev->gmc.visible_vram_size = adev->gmc.aper_size;
 394
 395	/* set the gart size */
 396	if (amdgpu_gart_size == -1) {
 397		switch (adev->asic_type) {
 398		case CHIP_TOPAZ:     /* no MM engines */
 399		default:
 400			adev->gmc.gart_size = 256ULL << 20;
 401			break;
 402#ifdef CONFIG_DRM_AMDGPU_CIK
 403		case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */
 404		case CHIP_HAWAII:  /* UVD, VCE do not support GPUVM */
 405		case CHIP_KAVERI:  /* UVD, VCE do not support GPUVM */
 406		case CHIP_KABINI:  /* UVD, VCE do not support GPUVM */
 407		case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */
 408			adev->gmc.gart_size = 1024ULL << 20;
 409			break;
 410#endif
 411		}
 412	} else {
 413		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
 414	}
 415
 416	adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
 417	gmc_v7_0_vram_gtt_location(adev, &adev->gmc);
 418
 419	return 0;
 420}
 421
 422/**
 423 * gmc_v7_0_flush_gpu_tlb_pasid - tlb flush via pasid
 424 *
 425 * @adev: amdgpu_device pointer
 426 * @pasid: pasid to be flush
 427 * @flush_type: type of flush
 428 * @all_hub: flush all hubs
 429 * @inst: is used to select which instance of KIQ to use for the invalidation
 430 *
 431 * Flush the TLB for the requested pasid.
 432 */
 433static void gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
 434					 uint16_t pasid, uint32_t flush_type,
 435					 bool all_hub, uint32_t inst)
 436{
 437	u32 mask = 0x0;
 438	int vmid;
 439
 440	for (vmid = 1; vmid < 16; vmid++) {
 441		u32 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
 442
 443		if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
 444		    (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid)
 445			mask |= 1 << vmid;
 446	}
 447
 448	WREG32(mmVM_INVALIDATE_REQUEST, mask);
 449	RREG32(mmVM_INVALIDATE_RESPONSE);
 450}
 451
 452/*
 453 * GART
 454 * VMID 0 is the physical GPU addresses as used by the kernel.
 455 * VMIDs 1-15 are used for userspace clients and are handled
 456 * by the amdgpu vm/hsa code.
 457 */
 458
 459/**
 460 * gmc_v7_0_flush_gpu_tlb - gart tlb flush callback
 461 *
 462 * @adev: amdgpu_device pointer
 463 * @vmid: vm instance to flush
 464 * @vmhub: which hub to flush
 465 * @flush_type: type of flush
 466 * *
 467 * Flush the TLB for the requested page table (CIK).
 468 */
 469static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 470					uint32_t vmhub, uint32_t flush_type)
 471{
 472	/* bits 0-15 are the VM contexts0-15 */
 473	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 474}
 475
 476static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 477					    unsigned int vmid, uint64_t pd_addr)
 478{
 479	uint32_t reg;
 480
 481	if (vmid < 8)
 482		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
 483	else
 484		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
 485	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
 486
 487	/* bits 0-15 are the VM contexts0-15 */
 488	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
 489
 490	return pd_addr;
 491}
 492
 493static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
 494					unsigned int pasid)
 495{
 496	amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
 497}
 498
 499static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
 500				uint64_t *addr, uint64_t *flags)
 501{
 502	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
 503}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 504
 505static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
 506				struct amdgpu_bo_va_mapping *mapping,
 507				uint64_t *flags)
 508{
 509	*flags &= ~AMDGPU_PTE_EXECUTABLE;
 510	*flags &= ~AMDGPU_PTE_PRT;
 511}
 512
 513/**
 514 * gmc_v7_0_set_fault_enable_default - update VM fault handling
 515 *
 516 * @adev: amdgpu_device pointer
 517 * @value: true redirects VM faults to the default page
 518 */
 519static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
 520					      bool value)
 521{
 522	u32 tmp;
 523
 524	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 525	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 526			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 527	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 528			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 529	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 530			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 531	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 532			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 533	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 534			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 535	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 536			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 537	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 538}
 539
 540/**
 541 * gmc_v7_0_set_prt - set PRT VM fault
 542 *
 543 * @adev: amdgpu_device pointer
 544 * @enable: enable/disable VM fault handling for PRT
 545 */
 546static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
 547{
 548	uint32_t tmp;
 549
 550	if (enable && !adev->gmc.prt_warning) {
 551		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
 552		adev->gmc.prt_warning = true;
 553	}
 554
 555	tmp = RREG32(mmVM_PRT_CNTL);
 556	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 557			    CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
 558	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 559			    CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
 560	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 561			    TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
 562	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 563			    TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
 564	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 565			    L2_CACHE_STORE_INVALID_ENTRIES, enable);
 566	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 567			    L1_TLB_STORE_INVALID_ENTRIES, enable);
 568	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 569			    MASK_PDE0_FAULT, enable);
 570	WREG32(mmVM_PRT_CNTL, tmp);
 571
 572	if (enable) {
 573		uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >>
 574			AMDGPU_GPU_PAGE_SHIFT;
 575		uint32_t high = adev->vm_manager.max_pfn -
 576			(AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT);
 577
 578		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
 579		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
 580		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
 581		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
 582		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
 583		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
 584		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
 585		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
 586	} else {
 587		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
 588		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
 589		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
 590		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
 591		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
 592		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
 593		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
 594		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
 595	}
 596}
 597
 598/**
 599 * gmc_v7_0_gart_enable - gart enable
 600 *
 601 * @adev: amdgpu_device pointer
 602 *
 603 * This sets up the TLBs, programs the page tables for VMID0,
 604 * sets up the hw for VMIDs 1-15 which are allocated on
 605 * demand, and sets up the global locations for the LDS, GDS,
 606 * and GPUVM for FSA64 clients (CIK).
 607 * Returns 0 for success, errors for failure.
 608 */
 609static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 610{
 611	uint64_t table_addr;
 612	u32 tmp, field;
 613	int i;
 614
 615	if (adev->gart.bo == NULL) {
 616		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 617		return -EINVAL;
 618	}
 619	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
 620	table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
 621
 622	/* Setup TLB control */
 623	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
 624	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
 625	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
 626	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
 627	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
 628	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
 629	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
 630	/* Setup L2 cache */
 631	tmp = RREG32(mmVM_L2_CNTL);
 632	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
 633	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
 634	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
 635	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
 636	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
 637	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
 638	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
 639	WREG32(mmVM_L2_CNTL, tmp);
 640	tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
 641	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
 642	WREG32(mmVM_L2_CNTL2, tmp);
 643
 644	field = adev->vm_manager.fragment_size;
 645	tmp = RREG32(mmVM_L2_CNTL3);
 646	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
 647	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
 648	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
 649	WREG32(mmVM_L2_CNTL3, tmp);
 650	/* setup context0 */
 651	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
 652	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
 653	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
 654	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 655			(u32)(adev->dummy_page_addr >> 12));
 656	WREG32(mmVM_CONTEXT0_CNTL2, 0);
 657	tmp = RREG32(mmVM_CONTEXT0_CNTL);
 658	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
 659	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
 660	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 661	WREG32(mmVM_CONTEXT0_CNTL, tmp);
 662
 663	WREG32(0x575, 0);
 664	WREG32(0x576, 0);
 665	WREG32(0x577, 0);
 666
 667	/* empty context1-15 */
 668	/* FIXME start with 4G, once using 2 level pt switch to full
 669	 * vm size space
 670	 */
 671	/* set vm size, must be a multiple of 4 */
 672	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
 673	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
 674	for (i = 1; i < AMDGPU_NUM_VMID; i++) {
 675		if (i < 8)
 676			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
 677			       table_addr >> 12);
 678		else
 679			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
 680			       table_addr >> 12);
 681	}
 682
 683	/* enable context1-15 */
 684	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
 685	       (u32)(adev->dummy_page_addr >> 12));
 686	WREG32(mmVM_CONTEXT1_CNTL2, 4);
 687	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 688	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
 689	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
 690	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
 691			    adev->vm_manager.block_size - 9);
 692	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 693	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
 694		gmc_v7_0_set_fault_enable_default(adev, false);
 695	else
 696		gmc_v7_0_set_fault_enable_default(adev, true);
 697
 698	if (adev->asic_type == CHIP_KAVERI) {
 699		tmp = RREG32(mmCHUB_CONTROL);
 700		tmp &= ~BYPASS_VM;
 701		WREG32(mmCHUB_CONTROL, tmp);
 702	}
 703
 704	gmc_v7_0_flush_gpu_tlb(adev, 0, 0, 0);
 705	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 706		 (unsigned int)(adev->gmc.gart_size >> 20),
 707		 (unsigned long long)table_addr);
 
 708	return 0;
 709}
 710
 711static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
 712{
 713	int r;
 714
 715	if (adev->gart.bo) {
 716		WARN(1, "R600 PCIE GART already initialized\n");
 717		return 0;
 718	}
 719	/* Initialize common gart structure */
 720	r = amdgpu_gart_init(adev);
 721	if (r)
 722		return r;
 723	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 724	adev->gart.gart_pte_flags = 0;
 725	return amdgpu_gart_table_vram_alloc(adev);
 726}
 727
 728/**
 729 * gmc_v7_0_gart_disable - gart disable
 730 *
 731 * @adev: amdgpu_device pointer
 732 *
 733 * This disables all VM page table (CIK).
 734 */
 735static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
 736{
 737	u32 tmp;
 738
 739	/* Disable all tables */
 740	WREG32(mmVM_CONTEXT0_CNTL, 0);
 741	WREG32(mmVM_CONTEXT1_CNTL, 0);
 742	/* Setup TLB control */
 743	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
 744	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
 745	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
 746	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
 747	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
 748	/* Setup L2 cache */
 749	tmp = RREG32(mmVM_L2_CNTL);
 750	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
 751	WREG32(mmVM_L2_CNTL, tmp);
 752	WREG32(mmVM_L2_CNTL2, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 753}
 754
 755/**
 756 * gmc_v7_0_vm_decode_fault - print human readable fault info
 757 *
 758 * @adev: amdgpu_device pointer
 759 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
 760 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
 761 * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
 762 * @pasid: debug logging only - no functional use
 763 *
 764 * Print human readable fault information (CIK).
 765 */
 766static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
 767				     u32 addr, u32 mc_client, unsigned int pasid)
 768{
 
 769	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
 770	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 771					PROTECTIONS);
 772	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
 773		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
 774	u32 mc_id;
 775
 776	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 777			      MEMORY_CLIENT_ID);
 778
 779	dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
 780	       protections, vmid, pasid, addr,
 781	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 782			     MEMORY_CLIENT_RW) ?
 783	       "write" : "read", block, mc_client, mc_id);
 784}
 785
 786
 787static const u32 mc_cg_registers[] = {
 788	mmMC_HUB_MISC_HUB_CG,
 789	mmMC_HUB_MISC_SIP_CG,
 790	mmMC_HUB_MISC_VM_CG,
 791	mmMC_XPB_CLK_GAT,
 792	mmATC_MISC_CG,
 793	mmMC_CITF_MISC_WR_CG,
 794	mmMC_CITF_MISC_RD_CG,
 795	mmMC_CITF_MISC_VM_CG,
 796	mmVM_L2_CG,
 797};
 798
 799static const u32 mc_cg_ls_en[] = {
 800	MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
 801	MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
 802	MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
 803	MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
 804	ATC_MISC_CG__MEM_LS_ENABLE_MASK,
 805	MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
 806	MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
 807	MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
 808	VM_L2_CG__MEM_LS_ENABLE_MASK,
 809};
 810
 811static const u32 mc_cg_en[] = {
 812	MC_HUB_MISC_HUB_CG__ENABLE_MASK,
 813	MC_HUB_MISC_SIP_CG__ENABLE_MASK,
 814	MC_HUB_MISC_VM_CG__ENABLE_MASK,
 815	MC_XPB_CLK_GAT__ENABLE_MASK,
 816	ATC_MISC_CG__ENABLE_MASK,
 817	MC_CITF_MISC_WR_CG__ENABLE_MASK,
 818	MC_CITF_MISC_RD_CG__ENABLE_MASK,
 819	MC_CITF_MISC_VM_CG__ENABLE_MASK,
 820	VM_L2_CG__ENABLE_MASK,
 821};
 822
 823static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
 824				  bool enable)
 825{
 826	int i;
 827	u32 orig, data;
 828
 829	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 830		orig = data = RREG32(mc_cg_registers[i]);
 831		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
 832			data |= mc_cg_ls_en[i];
 833		else
 834			data &= ~mc_cg_ls_en[i];
 835		if (data != orig)
 836			WREG32(mc_cg_registers[i], data);
 837	}
 838}
 839
 840static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
 841				    bool enable)
 842{
 843	int i;
 844	u32 orig, data;
 845
 846	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 847		orig = data = RREG32(mc_cg_registers[i]);
 848		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
 849			data |= mc_cg_en[i];
 850		else
 851			data &= ~mc_cg_en[i];
 852		if (data != orig)
 853			WREG32(mc_cg_registers[i], data);
 854	}
 855}
 856
 857static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
 858				     bool enable)
 859{
 860	u32 orig, data;
 861
 862	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
 863
 864	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
 865		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
 866		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
 867		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
 868		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
 869	} else {
 870		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
 871		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
 872		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
 873		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
 874	}
 875
 876	if (orig != data)
 877		WREG32_PCIE(ixPCIE_CNTL2, data);
 878}
 879
 880static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
 881				     bool enable)
 882{
 883	u32 orig, data;
 884
 885	orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
 886
 887	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
 888		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
 889	else
 890		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
 891
 892	if (orig != data)
 893		WREG32(mmHDP_HOST_PATH_CNTL, data);
 894}
 895
 896static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
 897				   bool enable)
 898{
 899	u32 orig, data;
 900
 901	orig = data = RREG32(mmHDP_MEM_POWER_LS);
 902
 903	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
 904		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
 905	else
 906		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
 907
 908	if (orig != data)
 909		WREG32(mmHDP_MEM_POWER_LS, data);
 910}
 911
 912static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type)
 913{
 914	switch (mc_seq_vram_type) {
 915	case MC_SEQ_MISC0__MT__GDDR1:
 916		return AMDGPU_VRAM_TYPE_GDDR1;
 917	case MC_SEQ_MISC0__MT__DDR2:
 918		return AMDGPU_VRAM_TYPE_DDR2;
 919	case MC_SEQ_MISC0__MT__GDDR3:
 920		return AMDGPU_VRAM_TYPE_GDDR3;
 921	case MC_SEQ_MISC0__MT__GDDR4:
 922		return AMDGPU_VRAM_TYPE_GDDR4;
 923	case MC_SEQ_MISC0__MT__GDDR5:
 924		return AMDGPU_VRAM_TYPE_GDDR5;
 925	case MC_SEQ_MISC0__MT__HBM:
 926		return AMDGPU_VRAM_TYPE_HBM;
 927	case MC_SEQ_MISC0__MT__DDR3:
 928		return AMDGPU_VRAM_TYPE_DDR3;
 929	default:
 930		return AMDGPU_VRAM_TYPE_UNKNOWN;
 931	}
 932}
 933
 934static int gmc_v7_0_early_init(struct amdgpu_ip_block *ip_block)
 935{
 936	struct amdgpu_device *adev = ip_block->adev;
 937
 938	gmc_v7_0_set_gmc_funcs(adev);
 939	gmc_v7_0_set_irq_funcs(adev);
 940
 941	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
 942	adev->gmc.shared_aperture_end =
 943		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
 944	adev->gmc.private_aperture_start =
 945		adev->gmc.shared_aperture_end + 1;
 946	adev->gmc.private_aperture_end =
 947		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
 948	adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
 949
 950	return 0;
 951}
 952
 953static int gmc_v7_0_late_init(struct amdgpu_ip_block *ip_block)
 954{
 955	struct amdgpu_device *adev = ip_block->adev;
 956
 957	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
 958		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
 959	else
 960		return 0;
 961}
 962
 963static unsigned int gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
 964{
 965	u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
 966	unsigned int size;
 967
 968	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
 969		size = AMDGPU_VBIOS_VGA_ALLOCATION;
 970	} else {
 971		u32 viewport = RREG32(mmVIEWPORT_SIZE);
 972
 973		size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
 974			REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
 975			4);
 976	}
 977
 978	return size;
 979}
 980
 981static int gmc_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
 982{
 983	int r;
 984	struct amdgpu_device *adev = ip_block->adev;
 985
 986	set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
 987
 988	if (adev->flags & AMD_IS_APU) {
 989		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
 990	} else {
 991		u32 tmp = RREG32(mmMC_SEQ_MISC0);
 992
 993		tmp &= MC_SEQ_MISC0__MT__MASK;
 994		adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
 995	}
 996
 997	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
 998	if (r)
 999		return r;
1000
1001	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
1002	if (r)
1003		return r;
1004
1005	/* Adjust VM size here.
1006	 * Currently set to 4GB ((1 << 20) 4k pages).
1007	 * Max GPUVM size for cayman and SI is 40 bits.
1008	 */
1009	amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
1010
1011	/* Set the internal MC address mask
1012	 * This is the max address of the GPU's
1013	 * internal address space.
1014	 */
1015	adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1016
1017	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
 
 
 
 
 
 
 
1018	if (r) {
1019		pr_warn("No suitable DMA available\n");
1020		return r;
 
 
 
 
 
 
1021	}
1022	adev->need_swiotlb = drm_need_swiotlb(40);
1023
1024	r = gmc_v7_0_init_microcode(adev);
1025	if (r) {
1026		DRM_ERROR("Failed to load mc firmware!\n");
1027		return r;
1028	}
1029
1030	r = gmc_v7_0_mc_init(adev);
1031	if (r)
1032		return r;
1033
1034	amdgpu_gmc_get_vbios_allocations(adev);
1035
1036	/* Memory manager */
1037	r = amdgpu_bo_init(adev);
1038	if (r)
1039		return r;
1040
1041	r = gmc_v7_0_gart_init(adev);
1042	if (r)
1043		return r;
1044
1045	/*
1046	 * number of VMs
1047	 * VMID 0 is reserved for System
1048	 * amdgpu graphics/compute will use VMIDs 1-7
1049	 * amdkfd will use VMIDs 8-15
1050	 */
1051	adev->vm_manager.first_kfd_vmid = 8;
1052	amdgpu_vm_manager_init(adev);
1053
1054	/* base offset of vram pages */
1055	if (adev->flags & AMD_IS_APU) {
1056		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
1057
1058		tmp <<= 22;
1059		adev->vm_manager.vram_base_offset = tmp;
1060	} else {
1061		adev->vm_manager.vram_base_offset = 0;
1062	}
1063
1064	adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
1065					GFP_KERNEL);
1066	if (!adev->gmc.vm_fault_info)
1067		return -ENOMEM;
1068	atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1069
1070	return 0;
1071}
1072
1073static int gmc_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
1074{
1075	struct amdgpu_device *adev = ip_block->adev;
1076
 
 
 
 
 
 
1077	amdgpu_gem_force_release(adev);
1078	amdgpu_vm_manager_fini(adev);
1079	kfree(adev->gmc.vm_fault_info);
1080	amdgpu_gart_table_vram_free(adev);
1081	amdgpu_bo_fini(adev);
1082	amdgpu_ucode_release(&adev->gmc.fw);
1083
1084	return 0;
1085}
1086
1087static int gmc_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
1088{
1089	int r;
1090	struct amdgpu_device *adev = ip_block->adev;
1091
1092	gmc_v7_0_init_golden_registers(adev);
1093
1094	gmc_v7_0_mc_program(adev);
1095
1096	if (!(adev->flags & AMD_IS_APU)) {
1097		r = gmc_v7_0_mc_load_microcode(adev);
1098		if (r) {
1099			DRM_ERROR("Failed to load MC firmware!\n");
1100			return r;
1101		}
1102	}
1103
1104	r = gmc_v7_0_gart_enable(adev);
1105	if (r)
1106		return r;
1107
1108	if (amdgpu_emu_mode == 1)
1109		return amdgpu_gmc_vram_checking(adev);
1110
1111	return 0;
1112}
1113
1114static int gmc_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
1115{
1116	struct amdgpu_device *adev = ip_block->adev;
1117
1118	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1119	gmc_v7_0_gart_disable(adev);
1120
1121	return 0;
1122}
1123
1124static int gmc_v7_0_suspend(struct amdgpu_ip_block *ip_block)
1125{
1126	gmc_v7_0_hw_fini(ip_block);
 
 
 
 
 
 
1127
1128	return 0;
1129}
1130
1131static int gmc_v7_0_resume(struct amdgpu_ip_block *ip_block)
1132{
1133	int r;
 
1134
1135	r = gmc_v7_0_hw_init(ip_block);
1136	if (r)
1137		return r;
1138
1139	amdgpu_vmid_reset_all(ip_block->adev);
 
 
 
 
 
 
 
1140
1141	return 0;
1142}
1143
1144static bool gmc_v7_0_is_idle(void *handle)
1145{
1146	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1147	u32 tmp = RREG32(mmSRBM_STATUS);
1148
1149	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1150		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1151		return false;
1152
1153	return true;
1154}
1155
1156static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1157{
1158	unsigned int i;
1159	u32 tmp;
1160	struct amdgpu_device *adev = ip_block->adev;
1161
1162	for (i = 0; i < adev->usec_timeout; i++) {
1163		/* read MC_STATUS */
1164		tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1165					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1166					       SRBM_STATUS__MCC_BUSY_MASK |
1167					       SRBM_STATUS__MCD_BUSY_MASK |
1168					       SRBM_STATUS__VMC_BUSY_MASK);
1169		if (!tmp)
1170			return 0;
1171		udelay(1);
1172	}
1173	return -ETIMEDOUT;
1174
1175}
1176
1177static int gmc_v7_0_soft_reset(struct amdgpu_ip_block *ip_block)
1178{
1179	struct amdgpu_device *adev = ip_block->adev;
 
1180	u32 srbm_soft_reset = 0;
1181	u32 tmp = RREG32(mmSRBM_STATUS);
1182
1183	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1184		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1185						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1186
1187	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1188		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1189		if (!(adev->flags & AMD_IS_APU))
1190			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1191							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1192	}
1193
1194	if (srbm_soft_reset) {
1195		gmc_v7_0_mc_stop(adev);
1196		if (gmc_v7_0_wait_for_idle(ip_block))
1197			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
 
 
1198
1199		tmp = RREG32(mmSRBM_SOFT_RESET);
1200		tmp |= srbm_soft_reset;
1201		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1202		WREG32(mmSRBM_SOFT_RESET, tmp);
1203		tmp = RREG32(mmSRBM_SOFT_RESET);
1204
1205		udelay(50);
1206
1207		tmp &= ~srbm_soft_reset;
1208		WREG32(mmSRBM_SOFT_RESET, tmp);
1209		tmp = RREG32(mmSRBM_SOFT_RESET);
1210
1211		/* Wait a little for things to settle down */
1212		udelay(50);
1213
1214		gmc_v7_0_mc_resume(adev);
1215		udelay(50);
1216	}
1217
1218	return 0;
1219}
1220
1221static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1222					     struct amdgpu_irq_src *src,
1223					     unsigned int type,
1224					     enum amdgpu_interrupt_state state)
1225{
1226	u32 tmp;
1227	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1228		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1229		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1230		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1231		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1232		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1233
1234	switch (state) {
1235	case AMDGPU_IRQ_STATE_DISABLE:
1236		/* system context */
1237		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1238		tmp &= ~bits;
1239		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1240		/* VMs */
1241		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1242		tmp &= ~bits;
1243		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1244		break;
1245	case AMDGPU_IRQ_STATE_ENABLE:
1246		/* system context */
1247		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1248		tmp |= bits;
1249		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1250		/* VMs */
1251		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1252		tmp |= bits;
1253		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1254		break;
1255	default:
1256		break;
1257	}
1258
1259	return 0;
1260}
1261
1262static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1263				      struct amdgpu_irq_src *source,
1264				      struct amdgpu_iv_entry *entry)
1265{
1266	u32 addr, status, mc_client, vmid;
1267
1268	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1269	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1270	mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1271	/* reset addr and status */
1272	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1273
1274	if (!addr && !status)
1275		return 0;
1276
1277	amdgpu_vm_update_fault_cache(adev, entry->pasid,
1278				     ((u64)addr) << AMDGPU_GPU_PAGE_SHIFT, status, AMDGPU_GFXHUB(0));
1279
1280	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1281		gmc_v7_0_set_fault_enable_default(adev, false);
1282
1283	if (printk_ratelimit()) {
1284		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1285			entry->src_id, entry->src_data[0]);
1286		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1287			addr);
1288		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1289			status);
1290		gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client,
1291					 entry->pasid);
1292	}
1293
1294	vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1295			     VMID);
1296	if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
1297		&& !atomic_read(&adev->gmc.vm_fault_info_updated)) {
1298		struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
1299		u32 protections = REG_GET_FIELD(status,
1300					VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1301					PROTECTIONS);
1302
1303		info->vmid = vmid;
1304		info->mc_id = REG_GET_FIELD(status,
1305					    VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1306					    MEMORY_CLIENT_ID);
1307		info->status = status;
1308		info->page_addr = addr;
1309		info->prot_valid = protections & 0x7 ? true : false;
1310		info->prot_read = protections & 0x8 ? true : false;
1311		info->prot_write = protections & 0x10 ? true : false;
1312		info->prot_exec = protections & 0x20 ? true : false;
1313		mb();
1314		atomic_set(&adev->gmc.vm_fault_info_updated, 1);
1315	}
1316
1317	return 0;
1318}
1319
1320static int gmc_v7_0_set_clockgating_state(void *handle,
1321					  enum amd_clockgating_state state)
1322{
1323	bool gate = false;
1324	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1325
1326	if (state == AMD_CG_STATE_GATE)
1327		gate = true;
1328
1329	if (!(adev->flags & AMD_IS_APU)) {
1330		gmc_v7_0_enable_mc_mgcg(adev, gate);
1331		gmc_v7_0_enable_mc_ls(adev, gate);
1332	}
1333	gmc_v7_0_enable_bif_mgls(adev, gate);
1334	gmc_v7_0_enable_hdp_mgcg(adev, gate);
1335	gmc_v7_0_enable_hdp_ls(adev, gate);
1336
1337	return 0;
1338}
1339
1340static int gmc_v7_0_set_powergating_state(void *handle,
1341					  enum amd_powergating_state state)
1342{
1343	return 0;
1344}
1345
1346static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
1347	.name = "gmc_v7_0",
1348	.early_init = gmc_v7_0_early_init,
1349	.late_init = gmc_v7_0_late_init,
1350	.sw_init = gmc_v7_0_sw_init,
1351	.sw_fini = gmc_v7_0_sw_fini,
1352	.hw_init = gmc_v7_0_hw_init,
1353	.hw_fini = gmc_v7_0_hw_fini,
1354	.suspend = gmc_v7_0_suspend,
1355	.resume = gmc_v7_0_resume,
1356	.is_idle = gmc_v7_0_is_idle,
1357	.wait_for_idle = gmc_v7_0_wait_for_idle,
1358	.soft_reset = gmc_v7_0_soft_reset,
1359	.set_clockgating_state = gmc_v7_0_set_clockgating_state,
1360	.set_powergating_state = gmc_v7_0_set_powergating_state,
1361};
1362
1363static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
1364	.flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
1365	.flush_gpu_tlb_pasid = gmc_v7_0_flush_gpu_tlb_pasid,
1366	.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
1367	.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
1368	.set_prt = gmc_v7_0_set_prt,
1369	.get_vm_pde = gmc_v7_0_get_vm_pde,
1370	.get_vm_pte = gmc_v7_0_get_vm_pte,
1371	.get_vbios_fb_size = gmc_v7_0_get_vbios_fb_size,
1372};
1373
1374static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
1375	.set = gmc_v7_0_vm_fault_interrupt_state,
1376	.process = gmc_v7_0_process_interrupt,
1377};
1378
1379static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev)
1380{
1381	adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
 
1382}
1383
1384static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1385{
1386	adev->gmc.vm_fault.num_types = 1;
1387	adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
1388}
1389
1390const struct amdgpu_ip_block_version gmc_v7_0_ip_block = {
 
1391	.type = AMD_IP_BLOCK_TYPE_GMC,
1392	.major = 7,
1393	.minor = 0,
1394	.rev = 0,
1395	.funcs = &gmc_v7_0_ip_funcs,
1396};
1397
1398const struct amdgpu_ip_block_version gmc_v7_4_ip_block = {
 
1399	.type = AMD_IP_BLOCK_TYPE_GMC,
1400	.major = 7,
1401	.minor = 4,
1402	.rev = 0,
1403	.funcs = &gmc_v7_0_ip_funcs,
1404};
v4.10.11
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
 
  23#include <linux/firmware.h>
  24#include "drmP.h"
 
 
 
  25#include "amdgpu.h"
  26#include "cikd.h"
  27#include "cik.h"
  28#include "gmc_v7_0.h"
  29#include "amdgpu_ucode.h"
 
 
  30
  31#include "bif/bif_4_1_d.h"
  32#include "bif/bif_4_1_sh_mask.h"
  33
  34#include "gmc/gmc_7_1_d.h"
  35#include "gmc/gmc_7_1_sh_mask.h"
  36
  37#include "oss/oss_2_0_d.h"
  38#include "oss/oss_2_0_sh_mask.h"
  39
  40static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
 
 
 
 
 
 
 
  41static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
  42static int gmc_v7_0_wait_for_idle(void *handle);
  43
  44MODULE_FIRMWARE("radeon/bonaire_mc.bin");
  45MODULE_FIRMWARE("radeon/hawaii_mc.bin");
  46MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
  47
  48static const u32 golden_settings_iceland_a11[] =
  49{
  50	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  51	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  52	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  53	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
  54};
  55
  56static const u32 iceland_mgcg_cgcg_init[] =
  57{
  58	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  59};
  60
  61static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
  62{
  63	switch (adev->asic_type) {
  64	case CHIP_TOPAZ:
  65		amdgpu_program_register_sequence(adev,
  66						 iceland_mgcg_cgcg_init,
  67						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
  68		amdgpu_program_register_sequence(adev,
  69						 golden_settings_iceland_a11,
  70						 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
  71		break;
  72	default:
  73		break;
  74	}
  75}
  76
  77static void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
  78			     struct amdgpu_mode_mc_save *save)
  79{
 
  80	u32 blackout;
  81
  82	if (adev->mode_info.num_crtc)
  83		amdgpu_display_stop_mc_access(adev, save);
 
  84
  85	gmc_v7_0_wait_for_idle((void *)adev);
  86
  87	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  88	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
  89		/* Block CPU access */
  90		WREG32(mmBIF_FB_EN, 0);
  91		/* blackout the MC */
  92		blackout = REG_SET_FIELD(blackout,
  93					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  94		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
  95	}
  96	/* wait for the MC to settle */
  97	udelay(100);
  98}
  99
 100static void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
 101			       struct amdgpu_mode_mc_save *save)
 102{
 103	u32 tmp;
 104
 105	/* unblackout the MC */
 106	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 107	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
 108	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
 109	/* allow CPU access */
 110	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
 111	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
 112	WREG32(mmBIF_FB_EN, tmp);
 113
 114	if (adev->mode_info.num_crtc)
 115		amdgpu_display_resume_mc_access(adev, save);
 116}
 117
 118/**
 119 * gmc_v7_0_init_microcode - load ucode images from disk
 120 *
 121 * @adev: amdgpu_device pointer
 122 *
 123 * Use the firmware interface to load the ucode images into
 124 * the driver (not loaded into hw).
 125 * Returns 0 on success, error on failure.
 126 */
 127static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
 128{
 129	const char *chip_name;
 130	char fw_name[30];
 131	int err;
 132
 133	DRM_DEBUG("\n");
 134
 135	switch (adev->asic_type) {
 136	case CHIP_BONAIRE:
 137		chip_name = "bonaire";
 138		break;
 139	case CHIP_HAWAII:
 140		chip_name = "hawaii";
 141		break;
 142	case CHIP_TOPAZ:
 143		chip_name = "topaz";
 144		break;
 145	case CHIP_KAVERI:
 146	case CHIP_KABINI:
 147	case CHIP_MULLINS:
 148		return 0;
 149	default: BUG();
 
 150	}
 151
 152	if (adev->asic_type == CHIP_TOPAZ)
 153		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
 154	else
 155		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
 156
 157	err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
 158	if (err)
 159		goto out;
 160	err = amdgpu_ucode_validate(adev->mc.fw);
 161
 162out:
 163	if (err) {
 164		printk(KERN_ERR
 165		       "cik_mc: Failed to load firmware \"%s\"\n",
 166		       fw_name);
 167		release_firmware(adev->mc.fw);
 168		adev->mc.fw = NULL;
 169	}
 170	return err;
 171}
 172
 173/**
 174 * gmc_v7_0_mc_load_microcode - load MC ucode into the hw
 175 *
 176 * @adev: amdgpu_device pointer
 177 *
 178 * Load the GDDR MC ucode into the hw (CIK).
 179 * Returns 0 on success, error on failure.
 180 */
 181static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
 182{
 183	const struct mc_firmware_header_v1_0 *hdr;
 184	const __le32 *fw_data = NULL;
 185	const __le32 *io_mc_regs = NULL;
 186	u32 running;
 187	int i, ucode_size, regs_size;
 188
 189	if (!adev->mc.fw)
 190		return -EINVAL;
 191
 192	hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
 193	amdgpu_ucode_print_mc_hdr(&hdr->header);
 194
 195	adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 196	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
 197	io_mc_regs = (const __le32 *)
 198		(adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
 199	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 200	fw_data = (const __le32 *)
 201		(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 202
 203	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
 204
 205	if (running == 0) {
 206		/* reset the engine and set to writable */
 207		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 208		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
 209
 210		/* load mc io regs */
 211		for (i = 0; i < regs_size; i++) {
 212			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
 213			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
 214		}
 215		/* load the MC ucode */
 216		for (i = 0; i < ucode_size; i++)
 217			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
 218
 219		/* put the engine back into the active state */
 220		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 221		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
 222		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
 223
 224		/* wait for training to complete */
 225		for (i = 0; i < adev->usec_timeout; i++) {
 226			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
 227					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
 228				break;
 229			udelay(1);
 230		}
 231		for (i = 0; i < adev->usec_timeout; i++) {
 232			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
 233					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
 234				break;
 235			udelay(1);
 236		}
 237	}
 238
 239	return 0;
 240}
 241
 242static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
 243				       struct amdgpu_mc *mc)
 244{
 245	if (mc->mc_vram_size > 0xFFC0000000ULL) {
 246		/* leave room for at least 1024M GTT */
 247		dev_warn(adev->dev, "limiting VRAM\n");
 248		mc->real_vram_size = 0xFFC0000000ULL;
 249		mc->mc_vram_size = 0xFFC0000000ULL;
 250	}
 251	amdgpu_vram_location(adev, &adev->mc, 0);
 252	adev->mc.gtt_base_align = 0;
 253	amdgpu_gtt_location(adev, mc);
 254}
 255
 256/**
 257 * gmc_v7_0_mc_program - program the GPU memory controller
 258 *
 259 * @adev: amdgpu_device pointer
 260 *
 261 * Set the location of vram, gart, and AGP in the GPU's
 262 * physical address space (CIK).
 263 */
 264static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
 265{
 266	struct amdgpu_mode_mc_save save;
 267	u32 tmp;
 268	int i, j;
 269
 
 
 
 
 270	/* Initialize HDP */
 271	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
 272		WREG32((0xb05 + j), 0x00000000);
 273		WREG32((0xb06 + j), 0x00000000);
 274		WREG32((0xb07 + j), 0x00000000);
 275		WREG32((0xb08 + j), 0x00000000);
 276		WREG32((0xb09 + j), 0x00000000);
 277	}
 278	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 279
 280	if (adev->mode_info.num_crtc)
 281		amdgpu_display_set_vga_render_state(adev, false);
 282
 283	gmc_v7_0_mc_stop(adev, &save);
 284	if (gmc_v7_0_wait_for_idle((void *)adev)) {
 285		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 
 
 
 
 
 
 
 286	}
 287	/* Update configuration */
 288	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 289	       adev->mc.vram_start >> 12);
 290	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 291	       adev->mc.vram_end >> 12);
 292	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
 293	       adev->vram_scratch.gpu_addr >> 12);
 294	tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
 295	tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
 296	WREG32(mmMC_VM_FB_LOCATION, tmp);
 297	/* XXX double check these! */
 298	WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
 299	WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
 300	WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
 301	WREG32(mmMC_VM_AGP_BASE, 0);
 302	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
 303	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
 304	if (gmc_v7_0_wait_for_idle((void *)adev)) {
 305		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 306	}
 307	gmc_v7_0_mc_resume(adev, &save);
 308
 309	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
 310
 311	tmp = RREG32(mmHDP_MISC_CNTL);
 312	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
 313	WREG32(mmHDP_MISC_CNTL, tmp);
 314
 315	tmp = RREG32(mmHDP_HOST_PATH_CNTL);
 316	WREG32(mmHDP_HOST_PATH_CNTL, tmp);
 317}
 318
 319/**
 320 * gmc_v7_0_mc_init - initialize the memory controller driver params
 321 *
 322 * @adev: amdgpu_device pointer
 323 *
 324 * Look up the amount of vram, vram width, and decide how to place
 325 * vram and gart within the GPU's physical address space (CIK).
 326 * Returns 0 for success.
 327 */
 328static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
 329{
 330	u32 tmp;
 331	int chansize, numchan;
 
 
 
 
 
 
 
 
 
 
 
 332
 333	/* Get VRAM informations */
 334	tmp = RREG32(mmMC_ARB_RAMCFG);
 335	if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
 336		chansize = 64;
 337	} else {
 338		chansize = 32;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 339	}
 340	tmp = RREG32(mmMC_SHARED_CHMAP);
 341	switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
 342	case 0:
 343	default:
 344		numchan = 1;
 345		break;
 346	case 1:
 347		numchan = 2;
 348		break;
 349	case 2:
 350		numchan = 4;
 351		break;
 352	case 3:
 353		numchan = 8;
 354		break;
 355	case 4:
 356		numchan = 3;
 357		break;
 358	case 5:
 359		numchan = 6;
 360		break;
 361	case 6:
 362		numchan = 10;
 363		break;
 364	case 7:
 365		numchan = 12;
 366		break;
 367	case 8:
 368		numchan = 16;
 369		break;
 370	}
 371	adev->mc.vram_width = numchan * chansize;
 372	/* Could aper size report 0 ? */
 373	adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
 374	adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
 375	/* size in MB on si */
 376	adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 377	adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 378	adev->mc.visible_vram_size = adev->mc.aper_size;
 379
 380	/* In case the PCI BAR is larger than the actual amount of vram */
 381	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
 382		adev->mc.visible_vram_size = adev->mc.real_vram_size;
 383
 384	/* unless the user had overridden it, set the gart
 385	 * size equal to the 1024 or vram, whichever is larger.
 386	 */
 387	if (amdgpu_gart_size == -1)
 388		adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
 389	else
 390		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 391
 392	gmc_v7_0_vram_gtt_location(adev, &adev->mc);
 
 393
 394	return 0;
 395}
 396
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 397/*
 398 * GART
 399 * VMID 0 is the physical GPU addresses as used by the kernel.
 400 * VMIDs 1-15 are used for userspace clients and are handled
 401 * by the amdgpu vm/hsa code.
 402 */
 403
 404/**
 405 * gmc_v7_0_gart_flush_gpu_tlb - gart tlb flush callback
 406 *
 407 * @adev: amdgpu_device pointer
 408 * @vmid: vm instance to flush
 409 *
 
 
 410 * Flush the TLB for the requested page table (CIK).
 411 */
 412static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
 413					uint32_t vmid)
 
 
 
 
 
 
 
 414{
 415	/* flush hdp cache */
 416	WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
 
 
 
 
 
 417
 418	/* bits 0-15 are the VM contexts0-15 */
 419	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 
 
 
 
 
 
 
 
 420}
 421
 422/**
 423 * gmc_v7_0_gart_set_pte_pde - update the page tables using MMIO
 424 *
 425 * @adev: amdgpu_device pointer
 426 * @cpu_pt_addr: cpu address of the page table
 427 * @gpu_page_idx: entry in the page table to update
 428 * @addr: dst addr to write into pte/pde
 429 * @flags: access flags
 430 *
 431 * Update the page tables using the CPU.
 432 */
 433static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev,
 434				     void *cpu_pt_addr,
 435				     uint32_t gpu_page_idx,
 436				     uint64_t addr,
 437				     uint32_t flags)
 438{
 439	void __iomem *ptr = (void *)cpu_pt_addr;
 440	uint64_t value;
 441
 442	value = addr & 0xFFFFFFFFFFFFF000ULL;
 443	value |= flags;
 444	writeq(value, ptr + (gpu_page_idx * 8));
 445
 446	return 0;
 
 
 
 
 
 447}
 448
 449/**
 450 * gmc_v8_0_set_fault_enable_default - update VM fault handling
 451 *
 452 * @adev: amdgpu_device pointer
 453 * @value: true redirects VM faults to the default page
 454 */
 455static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
 456					      bool value)
 457{
 458	u32 tmp;
 459
 460	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 461	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 462			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 463	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 464			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 465	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 466			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 467	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 468			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 469	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 470			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 471	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 472			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 473	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 474}
 475
 476/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 477 * gmc_v7_0_gart_enable - gart enable
 478 *
 479 * @adev: amdgpu_device pointer
 480 *
 481 * This sets up the TLBs, programs the page tables for VMID0,
 482 * sets up the hw for VMIDs 1-15 which are allocated on
 483 * demand, and sets up the global locations for the LDS, GDS,
 484 * and GPUVM for FSA64 clients (CIK).
 485 * Returns 0 for success, errors for failure.
 486 */
 487static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 488{
 489	int r, i;
 490	u32 tmp;
 
 491
 492	if (adev->gart.robj == NULL) {
 493		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 494		return -EINVAL;
 495	}
 496	r = amdgpu_gart_table_vram_pin(adev);
 497	if (r)
 498		return r;
 499	/* Setup TLB control */
 500	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
 501	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
 502	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
 503	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
 504	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
 505	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
 506	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
 507	/* Setup L2 cache */
 508	tmp = RREG32(mmVM_L2_CNTL);
 509	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
 510	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
 511	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
 512	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
 513	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
 514	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
 515	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
 516	WREG32(mmVM_L2_CNTL, tmp);
 517	tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
 518	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
 519	WREG32(mmVM_L2_CNTL2, tmp);
 
 
 520	tmp = RREG32(mmVM_L2_CNTL3);
 521	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
 522	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
 523	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
 524	WREG32(mmVM_L2_CNTL3, tmp);
 525	/* setup context0 */
 526	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
 527	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
 528	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
 529	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 530			(u32)(adev->dummy_page.addr >> 12));
 531	WREG32(mmVM_CONTEXT0_CNTL2, 0);
 532	tmp = RREG32(mmVM_CONTEXT0_CNTL);
 533	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
 534	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
 535	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 536	WREG32(mmVM_CONTEXT0_CNTL, tmp);
 537
 538	WREG32(0x575, 0);
 539	WREG32(0x576, 0);
 540	WREG32(0x577, 0);
 541
 542	/* empty context1-15 */
 543	/* FIXME start with 4G, once using 2 level pt switch to full
 544	 * vm size space
 545	 */
 546	/* set vm size, must be a multiple of 4 */
 547	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
 548	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
 549	for (i = 1; i < 16; i++) {
 550		if (i < 8)
 551			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
 552			       adev->gart.table_addr >> 12);
 553		else
 554			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
 555			       adev->gart.table_addr >> 12);
 556	}
 557
 558	/* enable context1-15 */
 559	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
 560	       (u32)(adev->dummy_page.addr >> 12));
 561	WREG32(mmVM_CONTEXT1_CNTL2, 4);
 562	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 563	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
 564	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
 565	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
 566			    amdgpu_vm_block_size - 9);
 567	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 568	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
 569		gmc_v7_0_set_fault_enable_default(adev, false);
 570	else
 571		gmc_v7_0_set_fault_enable_default(adev, true);
 572
 573	if (adev->asic_type == CHIP_KAVERI) {
 574		tmp = RREG32(mmCHUB_CONTROL);
 575		tmp &= ~BYPASS_VM;
 576		WREG32(mmCHUB_CONTROL, tmp);
 577	}
 578
 579	gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
 580	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 581		 (unsigned)(adev->mc.gtt_size >> 20),
 582		 (unsigned long long)adev->gart.table_addr);
 583	adev->gart.ready = true;
 584	return 0;
 585}
 586
 587static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
 588{
 589	int r;
 590
 591	if (adev->gart.robj) {
 592		WARN(1, "R600 PCIE GART already initialized\n");
 593		return 0;
 594	}
 595	/* Initialize common gart structure */
 596	r = amdgpu_gart_init(adev);
 597	if (r)
 598		return r;
 599	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 
 600	return amdgpu_gart_table_vram_alloc(adev);
 601}
 602
 603/**
 604 * gmc_v7_0_gart_disable - gart disable
 605 *
 606 * @adev: amdgpu_device pointer
 607 *
 608 * This disables all VM page table (CIK).
 609 */
 610static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
 611{
 612	u32 tmp;
 613
 614	/* Disable all tables */
 615	WREG32(mmVM_CONTEXT0_CNTL, 0);
 616	WREG32(mmVM_CONTEXT1_CNTL, 0);
 617	/* Setup TLB control */
 618	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
 619	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
 620	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
 621	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
 622	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
 623	/* Setup L2 cache */
 624	tmp = RREG32(mmVM_L2_CNTL);
 625	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
 626	WREG32(mmVM_L2_CNTL, tmp);
 627	WREG32(mmVM_L2_CNTL2, 0);
 628	amdgpu_gart_table_vram_unpin(adev);
 629}
 630
 631/**
 632 * gmc_v7_0_gart_fini - vm fini callback
 633 *
 634 * @adev: amdgpu_device pointer
 635 *
 636 * Tears down the driver GART/VM setup (CIK).
 637 */
 638static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
 639{
 640	amdgpu_gart_table_vram_free(adev);
 641	amdgpu_gart_fini(adev);
 642}
 643
 644/*
 645 * vm
 646 * VMID 0 is the physical GPU addresses as used by the kernel.
 647 * VMIDs 1-15 are used for userspace clients and are handled
 648 * by the amdgpu vm/hsa code.
 649 */
 650/**
 651 * gmc_v7_0_vm_init - cik vm init callback
 652 *
 653 * @adev: amdgpu_device pointer
 654 *
 655 * Inits cik specific vm parameters (number of VMs, base of vram for
 656 * VMIDs 1-15) (CIK).
 657 * Returns 0 for success.
 658 */
 659static int gmc_v7_0_vm_init(struct amdgpu_device *adev)
 660{
 661	/*
 662	 * number of VMs
 663	 * VMID 0 is reserved for System
 664	 * amdgpu graphics/compute will use VMIDs 1-7
 665	 * amdkfd will use VMIDs 8-15
 666	 */
 667	adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
 668	amdgpu_vm_manager_init(adev);
 669
 670	/* base offset of vram pages */
 671	if (adev->flags & AMD_IS_APU) {
 672		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
 673		tmp <<= 22;
 674		adev->vm_manager.vram_base_offset = tmp;
 675	} else
 676		adev->vm_manager.vram_base_offset = 0;
 677
 678	return 0;
 679}
 680
 681/**
 682 * gmc_v7_0_vm_fini - cik vm fini callback
 683 *
 684 * @adev: amdgpu_device pointer
 685 *
 686 * Tear down any asic specific VM setup (CIK).
 687 */
 688static void gmc_v7_0_vm_fini(struct amdgpu_device *adev)
 689{
 690}
 691
 692/**
 693 * gmc_v7_0_vm_decode_fault - print human readable fault info
 694 *
 695 * @adev: amdgpu_device pointer
 696 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
 697 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
 
 
 698 *
 699 * Print human readable fault information (CIK).
 700 */
 701static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev,
 702				     u32 status, u32 addr, u32 mc_client)
 703{
 704	u32 mc_id;
 705	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
 706	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 707					PROTECTIONS);
 708	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
 709		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
 
 710
 711	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 712			      MEMORY_CLIENT_ID);
 713
 714	dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
 715	       protections, vmid, addr,
 716	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 717			     MEMORY_CLIENT_RW) ?
 718	       "write" : "read", block, mc_client, mc_id);
 719}
 720
 721
 722static const u32 mc_cg_registers[] = {
 723	mmMC_HUB_MISC_HUB_CG,
 724	mmMC_HUB_MISC_SIP_CG,
 725	mmMC_HUB_MISC_VM_CG,
 726	mmMC_XPB_CLK_GAT,
 727	mmATC_MISC_CG,
 728	mmMC_CITF_MISC_WR_CG,
 729	mmMC_CITF_MISC_RD_CG,
 730	mmMC_CITF_MISC_VM_CG,
 731	mmVM_L2_CG,
 732};
 733
 734static const u32 mc_cg_ls_en[] = {
 735	MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
 736	MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
 737	MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
 738	MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
 739	ATC_MISC_CG__MEM_LS_ENABLE_MASK,
 740	MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
 741	MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
 742	MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
 743	VM_L2_CG__MEM_LS_ENABLE_MASK,
 744};
 745
 746static const u32 mc_cg_en[] = {
 747	MC_HUB_MISC_HUB_CG__ENABLE_MASK,
 748	MC_HUB_MISC_SIP_CG__ENABLE_MASK,
 749	MC_HUB_MISC_VM_CG__ENABLE_MASK,
 750	MC_XPB_CLK_GAT__ENABLE_MASK,
 751	ATC_MISC_CG__ENABLE_MASK,
 752	MC_CITF_MISC_WR_CG__ENABLE_MASK,
 753	MC_CITF_MISC_RD_CG__ENABLE_MASK,
 754	MC_CITF_MISC_VM_CG__ENABLE_MASK,
 755	VM_L2_CG__ENABLE_MASK,
 756};
 757
 758static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
 759				  bool enable)
 760{
 761	int i;
 762	u32 orig, data;
 763
 764	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 765		orig = data = RREG32(mc_cg_registers[i]);
 766		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
 767			data |= mc_cg_ls_en[i];
 768		else
 769			data &= ~mc_cg_ls_en[i];
 770		if (data != orig)
 771			WREG32(mc_cg_registers[i], data);
 772	}
 773}
 774
 775static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
 776				    bool enable)
 777{
 778	int i;
 779	u32 orig, data;
 780
 781	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 782		orig = data = RREG32(mc_cg_registers[i]);
 783		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
 784			data |= mc_cg_en[i];
 785		else
 786			data &= ~mc_cg_en[i];
 787		if (data != orig)
 788			WREG32(mc_cg_registers[i], data);
 789	}
 790}
 791
 792static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
 793				     bool enable)
 794{
 795	u32 orig, data;
 796
 797	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
 798
 799	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
 800		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
 801		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
 802		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
 803		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
 804	} else {
 805		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
 806		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
 807		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
 808		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
 809	}
 810
 811	if (orig != data)
 812		WREG32_PCIE(ixPCIE_CNTL2, data);
 813}
 814
 815static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
 816				     bool enable)
 817{
 818	u32 orig, data;
 819
 820	orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
 821
 822	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
 823		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
 824	else
 825		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
 826
 827	if (orig != data)
 828		WREG32(mmHDP_HOST_PATH_CNTL, data);
 829}
 830
 831static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
 832				   bool enable)
 833{
 834	u32 orig, data;
 835
 836	orig = data = RREG32(mmHDP_MEM_POWER_LS);
 837
 838	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
 839		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
 840	else
 841		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
 842
 843	if (orig != data)
 844		WREG32(mmHDP_MEM_POWER_LS, data);
 845}
 846
 847static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type)
 848{
 849	switch (mc_seq_vram_type) {
 850	case MC_SEQ_MISC0__MT__GDDR1:
 851		return AMDGPU_VRAM_TYPE_GDDR1;
 852	case MC_SEQ_MISC0__MT__DDR2:
 853		return AMDGPU_VRAM_TYPE_DDR2;
 854	case MC_SEQ_MISC0__MT__GDDR3:
 855		return AMDGPU_VRAM_TYPE_GDDR3;
 856	case MC_SEQ_MISC0__MT__GDDR4:
 857		return AMDGPU_VRAM_TYPE_GDDR4;
 858	case MC_SEQ_MISC0__MT__GDDR5:
 859		return AMDGPU_VRAM_TYPE_GDDR5;
 860	case MC_SEQ_MISC0__MT__HBM:
 861		return AMDGPU_VRAM_TYPE_HBM;
 862	case MC_SEQ_MISC0__MT__DDR3:
 863		return AMDGPU_VRAM_TYPE_DDR3;
 864	default:
 865		return AMDGPU_VRAM_TYPE_UNKNOWN;
 866	}
 867}
 868
 869static int gmc_v7_0_early_init(void *handle)
 870{
 871	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 872
 873	gmc_v7_0_set_gart_funcs(adev);
 874	gmc_v7_0_set_irq_funcs(adev);
 875
 
 
 
 
 
 
 
 
 
 876	return 0;
 877}
 878
 879static int gmc_v7_0_late_init(void *handle)
 880{
 881	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 882
 883	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
 884		return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
 885	else
 886		return 0;
 887}
 888
 889static int gmc_v7_0_sw_init(void *handle)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 890{
 891	int r;
 892	int dma_bits;
 893	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 894
 895	if (adev->flags & AMD_IS_APU) {
 896		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
 897	} else {
 898		u32 tmp = RREG32(mmMC_SEQ_MISC0);
 
 899		tmp &= MC_SEQ_MISC0__MT__MASK;
 900		adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
 901	}
 902
 903	r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
 904	if (r)
 905		return r;
 906
 907	r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
 908	if (r)
 909		return r;
 910
 911	/* Adjust VM size here.
 912	 * Currently set to 4GB ((1 << 20) 4k pages).
 913	 * Max GPUVM size for cayman and SI is 40 bits.
 914	 */
 915	adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
 916
 917	/* Set the internal MC address mask
 918	 * This is the max address of the GPU's
 919	 * internal address space.
 920	 */
 921	adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
 922
 923	/* set DMA mask + need_dma32 flags.
 924	 * PCIE - can handle 40-bits.
 925	 * IGP - can handle 40-bits
 926	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
 927	 */
 928	adev->need_dma32 = false;
 929	dma_bits = adev->need_dma32 ? 32 : 40;
 930	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
 931	if (r) {
 932		adev->need_dma32 = true;
 933		dma_bits = 32;
 934		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
 935	}
 936	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
 937	if (r) {
 938		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
 939		printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
 940	}
 
 941
 942	r = gmc_v7_0_init_microcode(adev);
 943	if (r) {
 944		DRM_ERROR("Failed to load mc firmware!\n");
 945		return r;
 946	}
 947
 948	r = gmc_v7_0_mc_init(adev);
 949	if (r)
 950		return r;
 951
 
 
 952	/* Memory manager */
 953	r = amdgpu_bo_init(adev);
 954	if (r)
 955		return r;
 956
 957	r = gmc_v7_0_gart_init(adev);
 958	if (r)
 959		return r;
 960
 961	if (!adev->vm_manager.enabled) {
 962		r = gmc_v7_0_vm_init(adev);
 963		if (r) {
 964			dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
 965			return r;
 966		}
 967		adev->vm_manager.enabled = true;
 
 
 
 
 
 
 
 
 
 
 968	}
 969
 970	return r;
 
 
 
 
 
 
 971}
 972
 973static int gmc_v7_0_sw_fini(void *handle)
 974{
 975	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 976
 977	if (adev->vm_manager.enabled) {
 978		amdgpu_vm_manager_fini(adev);
 979		gmc_v7_0_vm_fini(adev);
 980		adev->vm_manager.enabled = false;
 981	}
 982	gmc_v7_0_gart_fini(adev);
 983	amdgpu_gem_force_release(adev);
 
 
 
 984	amdgpu_bo_fini(adev);
 
 985
 986	return 0;
 987}
 988
 989static int gmc_v7_0_hw_init(void *handle)
 990{
 991	int r;
 992	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 993
 994	gmc_v7_0_init_golden_registers(adev);
 995
 996	gmc_v7_0_mc_program(adev);
 997
 998	if (!(adev->flags & AMD_IS_APU)) {
 999		r = gmc_v7_0_mc_load_microcode(adev);
1000		if (r) {
1001			DRM_ERROR("Failed to load MC firmware!\n");
1002			return r;
1003		}
1004	}
1005
1006	r = gmc_v7_0_gart_enable(adev);
1007	if (r)
1008		return r;
1009
1010	return r;
 
 
 
1011}
1012
1013static int gmc_v7_0_hw_fini(void *handle)
1014{
1015	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1016
1017	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
1018	gmc_v7_0_gart_disable(adev);
1019
1020	return 0;
1021}
1022
1023static int gmc_v7_0_suspend(void *handle)
1024{
1025	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1026
1027	if (adev->vm_manager.enabled) {
1028		gmc_v7_0_vm_fini(adev);
1029		adev->vm_manager.enabled = false;
1030	}
1031	gmc_v7_0_hw_fini(adev);
1032
1033	return 0;
1034}
1035
1036static int gmc_v7_0_resume(void *handle)
1037{
1038	int r;
1039	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1040
1041	r = gmc_v7_0_hw_init(adev);
1042	if (r)
1043		return r;
1044
1045	if (!adev->vm_manager.enabled) {
1046		r = gmc_v7_0_vm_init(adev);
1047		if (r) {
1048			dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1049			return r;
1050		}
1051		adev->vm_manager.enabled = true;
1052	}
1053
1054	return r;
1055}
1056
1057static bool gmc_v7_0_is_idle(void *handle)
1058{
1059	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1060	u32 tmp = RREG32(mmSRBM_STATUS);
1061
1062	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1063		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1064		return false;
1065
1066	return true;
1067}
1068
1069static int gmc_v7_0_wait_for_idle(void *handle)
1070{
1071	unsigned i;
1072	u32 tmp;
1073	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1074
1075	for (i = 0; i < adev->usec_timeout; i++) {
1076		/* read MC_STATUS */
1077		tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1078					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1079					       SRBM_STATUS__MCC_BUSY_MASK |
1080					       SRBM_STATUS__MCD_BUSY_MASK |
1081					       SRBM_STATUS__VMC_BUSY_MASK);
1082		if (!tmp)
1083			return 0;
1084		udelay(1);
1085	}
1086	return -ETIMEDOUT;
1087
1088}
1089
1090static int gmc_v7_0_soft_reset(void *handle)
1091{
1092	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1093	struct amdgpu_mode_mc_save save;
1094	u32 srbm_soft_reset = 0;
1095	u32 tmp = RREG32(mmSRBM_STATUS);
1096
1097	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1098		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1099						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1100
1101	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1102		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1103		if (!(adev->flags & AMD_IS_APU))
1104			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1105							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1106	}
1107
1108	if (srbm_soft_reset) {
1109		gmc_v7_0_mc_stop(adev, &save);
1110		if (gmc_v7_0_wait_for_idle((void *)adev)) {
1111			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1112		}
1113
1114
1115		tmp = RREG32(mmSRBM_SOFT_RESET);
1116		tmp |= srbm_soft_reset;
1117		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1118		WREG32(mmSRBM_SOFT_RESET, tmp);
1119		tmp = RREG32(mmSRBM_SOFT_RESET);
1120
1121		udelay(50);
1122
1123		tmp &= ~srbm_soft_reset;
1124		WREG32(mmSRBM_SOFT_RESET, tmp);
1125		tmp = RREG32(mmSRBM_SOFT_RESET);
1126
1127		/* Wait a little for things to settle down */
1128		udelay(50);
1129
1130		gmc_v7_0_mc_resume(adev, &save);
1131		udelay(50);
1132	}
1133
1134	return 0;
1135}
1136
1137static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1138					     struct amdgpu_irq_src *src,
1139					     unsigned type,
1140					     enum amdgpu_interrupt_state state)
1141{
1142	u32 tmp;
1143	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1144		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1145		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1146		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1147		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1148		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1149
1150	switch (state) {
1151	case AMDGPU_IRQ_STATE_DISABLE:
1152		/* system context */
1153		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1154		tmp &= ~bits;
1155		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1156		/* VMs */
1157		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1158		tmp &= ~bits;
1159		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1160		break;
1161	case AMDGPU_IRQ_STATE_ENABLE:
1162		/* system context */
1163		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1164		tmp |= bits;
1165		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1166		/* VMs */
1167		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1168		tmp |= bits;
1169		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1170		break;
1171	default:
1172		break;
1173	}
1174
1175	return 0;
1176}
1177
1178static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1179				      struct amdgpu_irq_src *source,
1180				      struct amdgpu_iv_entry *entry)
1181{
1182	u32 addr, status, mc_client;
1183
1184	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1185	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1186	mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1187	/* reset addr and status */
1188	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1189
1190	if (!addr && !status)
1191		return 0;
1192
 
 
 
1193	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1194		gmc_v7_0_set_fault_enable_default(adev, false);
1195
1196	if (printk_ratelimit()) {
1197		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1198			entry->src_id, entry->src_data);
1199		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1200			addr);
1201		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1202			status);
1203		gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1204	}
1205
1206	return 0;
1207}
1208
1209static int gmc_v7_0_set_clockgating_state(void *handle,
1210					  enum amd_clockgating_state state)
1211{
1212	bool gate = false;
1213	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1214
1215	if (state == AMD_CG_STATE_GATE)
1216		gate = true;
1217
1218	if (!(adev->flags & AMD_IS_APU)) {
1219		gmc_v7_0_enable_mc_mgcg(adev, gate);
1220		gmc_v7_0_enable_mc_ls(adev, gate);
1221	}
1222	gmc_v7_0_enable_bif_mgls(adev, gate);
1223	gmc_v7_0_enable_hdp_mgcg(adev, gate);
1224	gmc_v7_0_enable_hdp_ls(adev, gate);
1225
1226	return 0;
1227}
1228
1229static int gmc_v7_0_set_powergating_state(void *handle,
1230					  enum amd_powergating_state state)
1231{
1232	return 0;
1233}
1234
1235static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
1236	.name = "gmc_v7_0",
1237	.early_init = gmc_v7_0_early_init,
1238	.late_init = gmc_v7_0_late_init,
1239	.sw_init = gmc_v7_0_sw_init,
1240	.sw_fini = gmc_v7_0_sw_fini,
1241	.hw_init = gmc_v7_0_hw_init,
1242	.hw_fini = gmc_v7_0_hw_fini,
1243	.suspend = gmc_v7_0_suspend,
1244	.resume = gmc_v7_0_resume,
1245	.is_idle = gmc_v7_0_is_idle,
1246	.wait_for_idle = gmc_v7_0_wait_for_idle,
1247	.soft_reset = gmc_v7_0_soft_reset,
1248	.set_clockgating_state = gmc_v7_0_set_clockgating_state,
1249	.set_powergating_state = gmc_v7_0_set_powergating_state,
1250};
1251
1252static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
1253	.flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
1254	.set_pte_pde = gmc_v7_0_gart_set_pte_pde,
 
 
 
 
 
 
1255};
1256
1257static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
1258	.set = gmc_v7_0_vm_fault_interrupt_state,
1259	.process = gmc_v7_0_process_interrupt,
1260};
1261
1262static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev)
1263{
1264	if (adev->gart.gart_funcs == NULL)
1265		adev->gart.gart_funcs = &gmc_v7_0_gart_funcs;
1266}
1267
1268static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1269{
1270	adev->mc.vm_fault.num_types = 1;
1271	adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
1272}
1273
1274const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
1275{
1276	.type = AMD_IP_BLOCK_TYPE_GMC,
1277	.major = 7,
1278	.minor = 0,
1279	.rev = 0,
1280	.funcs = &gmc_v7_0_ip_funcs,
1281};
1282
1283const struct amdgpu_ip_block_version gmc_v7_4_ip_block =
1284{
1285	.type = AMD_IP_BLOCK_TYPE_GMC,
1286	.major = 7,
1287	.minor = 4,
1288	.rev = 0,
1289	.funcs = &gmc_v7_0_ip_funcs,
1290};