Linux Audio

Check our new training course

Loading...
v6.13.7
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25#include <linux/module.h>
  26#include <linux/pci.h>
  27
  28#include <drm/drm_cache.h>
  29#include "amdgpu.h"
  30#include "gmc_v6_0.h"
  31#include "amdgpu_ucode.h"
  32#include "amdgpu_gem.h"
  33
  34#include "bif/bif_3_0_d.h"
  35#include "bif/bif_3_0_sh_mask.h"
  36#include "oss/oss_1_0_d.h"
  37#include "oss/oss_1_0_sh_mask.h"
  38#include "gmc/gmc_6_0_d.h"
  39#include "gmc/gmc_6_0_sh_mask.h"
  40#include "dce/dce_6_0_d.h"
  41#include "dce/dce_6_0_sh_mask.h"
  42#include "si_enums.h"
  43
  44static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
  45static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
  46static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
  47
  48MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
  49MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
  50MODULE_FIRMWARE("amdgpu/verde_mc.bin");
  51MODULE_FIRMWARE("amdgpu/oland_mc.bin");
  52MODULE_FIRMWARE("amdgpu/hainan_mc.bin");
  53MODULE_FIRMWARE("amdgpu/si58_mc.bin");
  54
  55#define MC_SEQ_MISC0__MT__MASK   0xf0000000
  56#define MC_SEQ_MISC0__MT__GDDR1  0x10000000
  57#define MC_SEQ_MISC0__MT__DDR2   0x20000000
  58#define MC_SEQ_MISC0__MT__GDDR3  0x30000000
  59#define MC_SEQ_MISC0__MT__GDDR4  0x40000000
  60#define MC_SEQ_MISC0__MT__GDDR5  0x50000000
  61#define MC_SEQ_MISC0__MT__HBM    0x60000000
  62#define MC_SEQ_MISC0__MT__DDR3   0xB0000000
  63
  64static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
  65{
  66	u32 blackout;
  67	struct amdgpu_ip_block *ip_block;
  68
  69	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
  70	if (!ip_block)
  71		return;
  72
  73	gmc_v6_0_wait_for_idle(ip_block);
  74
  75	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  76	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
  77		/* Block CPU access */
  78		WREG32(mmBIF_FB_EN, 0);
  79		/* blackout the MC */
  80		blackout = REG_SET_FIELD(blackout,
  81					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  82		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
  83	}
  84	/* wait for the MC to settle */
  85	udelay(100);
  86
  87}
  88
  89static void gmc_v6_0_mc_resume(struct amdgpu_device *adev)
  90{
  91	u32 tmp;
  92
  93	/* unblackout the MC */
  94	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  95	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  96	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
  97	/* allow CPU access */
  98	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
  99	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
 100	WREG32(mmBIF_FB_EN, tmp);
 101}
 102
 103static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
 104{
 105	const char *chip_name;
 
 106	int err;
 
 107
 108	DRM_DEBUG("\n");
 109
 110	switch (adev->asic_type) {
 111	case CHIP_TAHITI:
 112		chip_name = "tahiti";
 113		break;
 114	case CHIP_PITCAIRN:
 115		chip_name = "pitcairn";
 116		break;
 117	case CHIP_VERDE:
 118		chip_name = "verde";
 119		break;
 120	case CHIP_OLAND:
 121		chip_name = "oland";
 122		break;
 123	case CHIP_HAINAN:
 124		chip_name = "hainan";
 125		break;
 126	default:
 127		BUG();
 128	}
 129
 130	/* this memory configuration requires special firmware */
 131	if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
 132		chip_name = "si58";
 
 
 
 
 
 
 
 
 
 
 133
 134	err = amdgpu_ucode_request(adev, &adev->gmc.fw, "amdgpu/%s_mc.bin", chip_name);
 135	if (err) {
 136		dev_err(adev->dev,
 137		       "si_mc: Failed to load firmware \"%s_mc.bin\"\n",
 138		       chip_name);
 139		amdgpu_ucode_release(&adev->gmc.fw);
 
 140	}
 141	return err;
 142}
 143
 144static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
 145{
 146	const __le32 *new_fw_data = NULL;
 147	u32 running;
 148	const __le32 *new_io_mc_regs = NULL;
 149	int i, regs_size, ucode_size;
 150	const struct mc_firmware_header_v1_0 *hdr;
 151
 152	if (!adev->gmc.fw)
 153		return -EINVAL;
 154
 155	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
 156
 157	amdgpu_ucode_print_mc_hdr(&hdr->header);
 158
 159	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 160	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
 161	new_io_mc_regs = (const __le32 *)
 162		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
 163	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 164	new_fw_data = (const __le32 *)
 165		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 166
 167	running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
 168
 169	if (running == 0) {
 170
 171		/* reset the engine and set to writable */
 172		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 173		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
 174
 175		/* load mc io regs */
 176		for (i = 0; i < regs_size; i++) {
 177			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
 178			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
 179		}
 180		/* load the MC ucode */
 181		for (i = 0; i < ucode_size; i++)
 182			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
 
 183
 184		/* put the engine back into the active state */
 185		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 186		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
 187		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
 188
 189		/* wait for training to complete */
 190		for (i = 0; i < adev->usec_timeout; i++) {
 191			if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK)
 192				break;
 193			udelay(1);
 194		}
 195		for (i = 0; i < adev->usec_timeout; i++) {
 196			if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK)
 197				break;
 198			udelay(1);
 199		}
 200
 201	}
 202
 203	return 0;
 204}
 205
 206static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
 207				       struct amdgpu_gmc *mc)
 208{
 209	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
 210
 211	base <<= 24;
 212
 213	amdgpu_gmc_set_agp_default(adev, mc);
 214	amdgpu_gmc_vram_location(adev, mc, base);
 215	amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
 216}
 217
 218static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
 219{
 220	int i, j;
 221	struct amdgpu_ip_block *ip_block;
 222
 223
 224	/* Initialize HDP */
 225	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
 226		WREG32((0xb05 + j), 0x00000000);
 227		WREG32((0xb06 + j), 0x00000000);
 228		WREG32((0xb07 + j), 0x00000000);
 229		WREG32((0xb08 + j), 0x00000000);
 230		WREG32((0xb09 + j), 0x00000000);
 231	}
 232	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 233
 234	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
 235	if (!ip_block)
 236		return;
 237
 238	if (gmc_v6_0_wait_for_idle(ip_block))
 239		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 
 240
 241	if (adev->mode_info.num_crtc) {
 242		u32 tmp;
 243
 244		/* Lockout access through VGA aperture*/
 245		tmp = RREG32(mmVGA_HDP_CONTROL);
 246		tmp |= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK;
 247		WREG32(mmVGA_HDP_CONTROL, tmp);
 248
 249		/* disable VGA render */
 250		tmp = RREG32(mmVGA_RENDER_CONTROL);
 251		tmp &= ~VGA_VSTATUS_CNTL;
 252		WREG32(mmVGA_RENDER_CONTROL, tmp);
 253	}
 254	/* Update configuration */
 255	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 256	       adev->gmc.vram_start >> 12);
 257	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 258	       adev->gmc.vram_end >> 12);
 259	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
 260	       adev->mem_scratch.gpu_addr >> 12);
 261	WREG32(mmMC_VM_AGP_BASE, 0);
 262	WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22);
 263	WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22);
 264
 265	if (gmc_v6_0_wait_for_idle(ip_block))
 266		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 
 267}
 268
 269static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
 270{
 271
 272	u32 tmp;
 273	int chansize, numchan;
 274	int r;
 275
 276	tmp = RREG32(mmMC_ARB_RAMCFG);
 277	if (tmp & (1 << 11))
 278		chansize = 16;
 279	else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK)
 280		chansize = 64;
 281	else
 282		chansize = 32;
 283
 284	tmp = RREG32(mmMC_SHARED_CHMAP);
 285	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 286	case 0:
 287	default:
 288		numchan = 1;
 289		break;
 290	case 1:
 291		numchan = 2;
 292		break;
 293	case 2:
 294		numchan = 4;
 295		break;
 296	case 3:
 297		numchan = 8;
 298		break;
 299	case 4:
 300		numchan = 3;
 301		break;
 302	case 5:
 303		numchan = 6;
 304		break;
 305	case 6:
 306		numchan = 10;
 307		break;
 308	case 7:
 309		numchan = 12;
 310		break;
 311	case 8:
 312		numchan = 16;
 313		break;
 314	}
 315	adev->gmc.vram_width = numchan * chansize;
 316	/* size in MB on si */
 317	adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 318	adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 319
 320	if (!(adev->flags & AMD_IS_APU)) {
 321		r = amdgpu_device_resize_fb_bar(adev);
 322		if (r)
 323			return r;
 324	}
 325	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
 326	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 327	adev->gmc.visible_vram_size = adev->gmc.aper_size;
 328
 329	/* set the gart size */
 330	if (amdgpu_gart_size == -1) {
 331		switch (adev->asic_type) {
 332		case CHIP_HAINAN:    /* no MM engines */
 333		default:
 334			adev->gmc.gart_size = 256ULL << 20;
 335			break;
 336		case CHIP_VERDE:    /* UVD, VCE do not support GPUVM */
 337		case CHIP_TAHITI:   /* UVD, VCE do not support GPUVM */
 338		case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */
 339		case CHIP_OLAND:    /* UVD, VCE do not support GPUVM */
 340			adev->gmc.gart_size = 1024ULL << 20;
 341			break;
 342		}
 343	} else {
 344		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
 345	}
 346
 347	adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
 348	gmc_v6_0_vram_gtt_location(adev, &adev->gmc);
 349
 350	return 0;
 351}
 352
 353static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 354					uint32_t vmhub, uint32_t flush_type)
 355{
 356	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 357}
 358
 359static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 360					    unsigned int vmid, uint64_t pd_addr)
 361{
 362	uint32_t reg;
 363
 364	/* write new base address */
 365	if (vmid < 8)
 366		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
 367	else
 368		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8);
 369	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
 370
 371	/* bits 0-15 are the VM contexts0-15 */
 372	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
 373
 374	return pd_addr;
 375}
 376
 377static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
 378				uint64_t *addr, uint64_t *flags)
 379{
 380	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
 381}
 382
 383static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
 384				struct amdgpu_bo_va_mapping *mapping,
 385				uint64_t *flags)
 386{
 387	*flags &= ~AMDGPU_PTE_EXECUTABLE;
 388	*flags &= ~AMDGPU_PTE_PRT;
 389}
 390
 391static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
 392					      bool value)
 393{
 394	u32 tmp;
 395
 396	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 397	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 398			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 399	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 400			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 401	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 402			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 403	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 404			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 405	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 406			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 407	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 408			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 409	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 410}
 411
 412 /**
 413  * gmc_v8_0_set_prt() - set PRT VM fault
 414  *
 415  * @adev: amdgpu_device pointer
 416  * @enable: enable/disable VM fault handling for PRT
 417  */
 418static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
 419{
 420	u32 tmp;
 421
 422	if (enable && !adev->gmc.prt_warning) {
 423		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
 424		adev->gmc.prt_warning = true;
 425	}
 426
 427	tmp = RREG32(mmVM_PRT_CNTL);
 428	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 429			    CB_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
 430			    enable);
 431	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 432			    TC_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
 433			    enable);
 434	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 435			    L2_CACHE_STORE_INVALID_ENTRIES,
 436			    enable);
 437	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 438			    L1_TLB_STORE_INVALID_ENTRIES,
 439			    enable);
 440	WREG32(mmVM_PRT_CNTL, tmp);
 441
 442	if (enable) {
 443		uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >>
 444			AMDGPU_GPU_PAGE_SHIFT;
 445		uint32_t high = adev->vm_manager.max_pfn -
 446			(AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT);
 447
 448		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
 449		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
 450		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
 451		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
 452		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
 453		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
 454		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
 455		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
 456	} else {
 457		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
 458		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
 459		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
 460		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
 461		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
 462		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
 463		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
 464		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
 465	}
 466}
 467
 468static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 469{
 470	uint64_t table_addr;
 
 471	u32 field;
 472	int i;
 473
 474	if (adev->gart.bo == NULL) {
 475		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 476		return -EINVAL;
 477	}
 478	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
 
 
 479
 480	table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
 481
 482	/* Setup TLB control */
 483	WREG32(mmMC_VM_MX_L1_TLB_CNTL,
 484	       (0xA << 7) |
 485	       MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK |
 486	       MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK |
 487	       MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
 488	       MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK |
 489	       (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
 490	/* Setup L2 cache */
 491	WREG32(mmVM_L2_CNTL,
 492	       VM_L2_CNTL__ENABLE_L2_CACHE_MASK |
 493	       VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK |
 494	       VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 495	       VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 496	       (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
 497	       (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
 498	WREG32(mmVM_L2_CNTL2,
 499	       VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
 500	       VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
 501
 502	field = adev->vm_manager.fragment_size;
 503	WREG32(mmVM_L2_CNTL3,
 504	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
 505	       (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
 506	       (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
 507	/* setup context0 */
 508	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
 509	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
 510	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
 511	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 512			(u32)(adev->dummy_page_addr >> 12));
 513	WREG32(mmVM_CONTEXT0_CNTL2, 0);
 514	WREG32(mmVM_CONTEXT0_CNTL,
 515	       VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
 516	       (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
 517	       VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
 518
 519	WREG32(0x575, 0);
 520	WREG32(0x576, 0);
 521	WREG32(0x577, 0);
 522
 523	/* empty context1-15 */
 524	/* set vm size, must be a multiple of 4 */
 525	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
 526	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
 527	/* Assign the pt base to something valid for now; the pts used for
 528	 * the VMs are determined by the application and setup and assigned
 529	 * on the fly in the vm part of radeon_gart.c
 530	 */
 531	for (i = 1; i < AMDGPU_NUM_VMID; i++) {
 532		if (i < 8)
 533			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
 534			       table_addr >> 12);
 535		else
 536			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
 537			       table_addr >> 12);
 538	}
 539
 540	/* enable context1-15 */
 541	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
 542	       (u32)(adev->dummy_page_addr >> 12));
 543	WREG32(mmVM_CONTEXT1_CNTL2, 4);
 544	WREG32(mmVM_CONTEXT1_CNTL,
 545	       VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
 546	       (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
 547	       ((adev->vm_manager.block_size - 9)
 548	       << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
 549	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
 550		gmc_v6_0_set_fault_enable_default(adev, false);
 551	else
 552		gmc_v6_0_set_fault_enable_default(adev, true);
 553
 554	gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0);
 555	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
 556		 (unsigned int)(adev->gmc.gart_size >> 20),
 557		 (unsigned long long)table_addr);
 
 558	return 0;
 559}
 560
 561static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
 562{
 563	int r;
 564
 565	if (adev->gart.bo) {
 566		dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
 567		return 0;
 568	}
 569	r = amdgpu_gart_init(adev);
 570	if (r)
 571		return r;
 572	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 573	adev->gart.gart_pte_flags = 0;
 574	return amdgpu_gart_table_vram_alloc(adev);
 575}
 576
 577static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
 578{
 579	/*unsigned i;
 580
 581	for (i = 1; i < 16; ++i) {
 582		uint32_t reg;
 583		if (i < 8)
 584			reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ;
 585		else
 586			reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8);
 587		adev->vm_manager.saved_table_addr[i] = RREG32(reg);
 588	}*/
 589
 590	/* Disable all tables */
 591	WREG32(mmVM_CONTEXT0_CNTL, 0);
 592	WREG32(mmVM_CONTEXT1_CNTL, 0);
 593	/* Setup TLB control */
 594	WREG32(mmMC_VM_MX_L1_TLB_CNTL,
 595	       MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
 596	       (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
 597	/* Setup L2 cache */
 598	WREG32(mmVM_L2_CNTL,
 599	       VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 600	       VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 601	       (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
 602	       (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
 603	WREG32(mmVM_L2_CNTL2, 0);
 604	WREG32(mmVM_L2_CNTL3,
 605	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
 606	       (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
 
 607}
 608
 609static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
 610				     u32 status, u32 addr, u32 mc_client)
 611{
 612	u32 mc_id;
 613	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
 614	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 615					PROTECTIONS);
 616	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
 617		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
 618
 619	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 620			      MEMORY_CLIENT_ID);
 621
 622	dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
 623	       protections, vmid, addr,
 624	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 625			     MEMORY_CLIENT_RW) ?
 626	       "write" : "read", block, mc_client, mc_id);
 627}
 628
 629/*
 630static const u32 mc_cg_registers[] = {
 631	MC_HUB_MISC_HUB_CG,
 632	MC_HUB_MISC_SIP_CG,
 633	MC_HUB_MISC_VM_CG,
 634	MC_XPB_CLK_GAT,
 635	ATC_MISC_CG,
 636	MC_CITF_MISC_WR_CG,
 637	MC_CITF_MISC_RD_CG,
 638	MC_CITF_MISC_VM_CG,
 639	VM_L2_CG,
 640};
 641
 642static const u32 mc_cg_ls_en[] = {
 643	MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
 644	MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
 645	MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
 646	MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
 647	ATC_MISC_CG__MEM_LS_ENABLE_MASK,
 648	MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
 649	MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
 650	MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
 651	VM_L2_CG__MEM_LS_ENABLE_MASK,
 652};
 653
 654static const u32 mc_cg_en[] = {
 655	MC_HUB_MISC_HUB_CG__ENABLE_MASK,
 656	MC_HUB_MISC_SIP_CG__ENABLE_MASK,
 657	MC_HUB_MISC_VM_CG__ENABLE_MASK,
 658	MC_XPB_CLK_GAT__ENABLE_MASK,
 659	ATC_MISC_CG__ENABLE_MASK,
 660	MC_CITF_MISC_WR_CG__ENABLE_MASK,
 661	MC_CITF_MISC_RD_CG__ENABLE_MASK,
 662	MC_CITF_MISC_VM_CG__ENABLE_MASK,
 663	VM_L2_CG__ENABLE_MASK,
 664};
 665
 666static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
 667				  bool enable)
 668{
 669	int i;
 670	u32 orig, data;
 671
 672	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 673		orig = data = RREG32(mc_cg_registers[i]);
 674		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
 675			data |= mc_cg_ls_en[i];
 676		else
 677			data &= ~mc_cg_ls_en[i];
 678		if (data != orig)
 679			WREG32(mc_cg_registers[i], data);
 680	}
 681}
 682
 683static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
 684				    bool enable)
 685{
 686	int i;
 687	u32 orig, data;
 688
 689	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 690		orig = data = RREG32(mc_cg_registers[i]);
 691		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
 692			data |= mc_cg_en[i];
 693		else
 694			data &= ~mc_cg_en[i];
 695		if (data != orig)
 696			WREG32(mc_cg_registers[i], data);
 697	}
 698}
 699
 700static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
 701				     bool enable)
 702{
 703	u32 orig, data;
 704
 705	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
 706
 707	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
 708		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
 709		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
 710		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
 711		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
 712	} else {
 713		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
 714		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
 715		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
 716		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
 717	}
 718
 719	if (orig != data)
 720		WREG32_PCIE(ixPCIE_CNTL2, data);
 721}
 722
 723static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
 724				     bool enable)
 725{
 726	u32 orig, data;
 727
 728	orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
 729
 730	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
 731		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
 732	else
 733		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
 734
 735	if (orig != data)
 736		WREG32(mmHDP_HOST_PATH_CNTL, data);
 737}
 738
 739static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
 740				   bool enable)
 741{
 742	u32 orig, data;
 743
 744	orig = data = RREG32(mmHDP_MEM_POWER_LS);
 745
 746	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
 747		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
 748	else
 749		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
 750
 751	if (orig != data)
 752		WREG32(mmHDP_MEM_POWER_LS, data);
 753}
 754*/
 755
 756static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
 757{
 758	switch (mc_seq_vram_type) {
 759	case MC_SEQ_MISC0__MT__GDDR1:
 760		return AMDGPU_VRAM_TYPE_GDDR1;
 761	case MC_SEQ_MISC0__MT__DDR2:
 762		return AMDGPU_VRAM_TYPE_DDR2;
 763	case MC_SEQ_MISC0__MT__GDDR3:
 764		return AMDGPU_VRAM_TYPE_GDDR3;
 765	case MC_SEQ_MISC0__MT__GDDR4:
 766		return AMDGPU_VRAM_TYPE_GDDR4;
 767	case MC_SEQ_MISC0__MT__GDDR5:
 768		return AMDGPU_VRAM_TYPE_GDDR5;
 769	case MC_SEQ_MISC0__MT__DDR3:
 770		return AMDGPU_VRAM_TYPE_DDR3;
 771	default:
 772		return AMDGPU_VRAM_TYPE_UNKNOWN;
 773	}
 774}
 775
 776static int gmc_v6_0_early_init(struct amdgpu_ip_block *ip_block)
 777{
 778	struct amdgpu_device *adev = ip_block->adev;
 779
 780	gmc_v6_0_set_gmc_funcs(adev);
 781	gmc_v6_0_set_irq_funcs(adev);
 782
 783	return 0;
 784}
 785
 786static int gmc_v6_0_late_init(struct amdgpu_ip_block *ip_block)
 787{
 788	struct amdgpu_device *adev = ip_block->adev;
 789
 790	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
 791		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
 792	else
 793		return 0;
 794}
 795
 796static unsigned int gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
 797{
 798	u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
 799	unsigned int size;
 800
 801	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
 802		size = AMDGPU_VBIOS_VGA_ALLOCATION;
 803	} else {
 804		u32 viewport = RREG32(mmVIEWPORT_SIZE);
 805
 806		size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
 807			REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
 808			4);
 809	}
 810	return size;
 811}
 812
 813static int gmc_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
 814{
 815	int r;
 816	struct amdgpu_device *adev = ip_block->adev;
 817
 818	set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
 819
 820	if (adev->flags & AMD_IS_APU) {
 821		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
 822	} else {
 823		u32 tmp = RREG32(mmMC_SEQ_MISC0);
 824
 825		tmp &= MC_SEQ_MISC0__MT__MASK;
 826		adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
 827	}
 828
 829	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
 830	if (r)
 831		return r;
 832
 833	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
 834	if (r)
 835		return r;
 836
 837	amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
 838
 839	adev->gmc.mc_mask = 0xffffffffffULL;
 840
 841	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
 842	if (r) {
 843		dev_warn(adev->dev, "No suitable DMA available.\n");
 844		return r;
 845	}
 846	adev->need_swiotlb = drm_need_swiotlb(40);
 847
 848	r = gmc_v6_0_init_microcode(adev);
 849	if (r) {
 850		dev_err(adev->dev, "Failed to load mc firmware!\n");
 851		return r;
 852	}
 853
 854	r = gmc_v6_0_mc_init(adev);
 855	if (r)
 856		return r;
 857
 858	amdgpu_gmc_get_vbios_allocations(adev);
 859
 860	r = amdgpu_bo_init(adev);
 861	if (r)
 862		return r;
 863
 864	r = gmc_v6_0_gart_init(adev);
 865	if (r)
 866		return r;
 867
 868	/*
 869	 * number of VMs
 870	 * VMID 0 is reserved for System
 871	 * amdgpu graphics/compute will use VMIDs 1-7
 872	 * amdkfd will use VMIDs 8-15
 873	 */
 874	adev->vm_manager.first_kfd_vmid = 8;
 875	amdgpu_vm_manager_init(adev);
 876
 877	/* base offset of vram pages */
 878	if (adev->flags & AMD_IS_APU) {
 879		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
 880
 881		tmp <<= 22;
 882		adev->vm_manager.vram_base_offset = tmp;
 883	} else {
 884		adev->vm_manager.vram_base_offset = 0;
 885	}
 886
 887	return 0;
 888}
 889
 890static int gmc_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
 891{
 892	struct amdgpu_device *adev = ip_block->adev;
 893
 894	amdgpu_gem_force_release(adev);
 895	amdgpu_vm_manager_fini(adev);
 896	amdgpu_gart_table_vram_free(adev);
 897	amdgpu_bo_fini(adev);
 898	amdgpu_ucode_release(&adev->gmc.fw);
 
 899
 900	return 0;
 901}
 902
 903static int gmc_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
 904{
 905	int r;
 906	struct amdgpu_device *adev = ip_block->adev;
 907
 908	gmc_v6_0_mc_program(adev);
 909
 910	if (!(adev->flags & AMD_IS_APU)) {
 911		r = gmc_v6_0_mc_load_microcode(adev);
 912		if (r) {
 913			dev_err(adev->dev, "Failed to load MC firmware!\n");
 914			return r;
 915		}
 916	}
 917
 918	r = gmc_v6_0_gart_enable(adev);
 919	if (r)
 920		return r;
 921
 922	if (amdgpu_emu_mode == 1)
 923		return amdgpu_gmc_vram_checking(adev);
 924
 925	return 0;
 926}
 927
 928static int gmc_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
 929{
 930	struct amdgpu_device *adev = ip_block->adev;
 931
 932	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
 933	gmc_v6_0_gart_disable(adev);
 934
 935	return 0;
 936}
 937
 938static int gmc_v6_0_suspend(struct amdgpu_ip_block *ip_block)
 939{
 940	gmc_v6_0_hw_fini(ip_block);
 
 
 941
 942	return 0;
 943}
 944
 945static int gmc_v6_0_resume(struct amdgpu_ip_block *ip_block)
 946{
 947	int r;
 948	struct amdgpu_device *adev = ip_block->adev;
 949
 950	r = gmc_v6_0_hw_init(ip_block);
 951	if (r)
 952		return r;
 953
 954	amdgpu_vmid_reset_all(adev);
 955
 956	return 0;
 957}
 958
 959static bool gmc_v6_0_is_idle(void *handle)
 960{
 961	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 962
 963	u32 tmp = RREG32(mmSRBM_STATUS);
 964
 965	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
 966		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
 967		return false;
 968
 969	return true;
 970}
 971
 972static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
 973{
 974	unsigned int i;
 975	struct amdgpu_device *adev = ip_block->adev;
 976
 977	for (i = 0; i < adev->usec_timeout; i++) {
 978		if (gmc_v6_0_is_idle(adev))
 979			return 0;
 980		udelay(1);
 981	}
 982	return -ETIMEDOUT;
 983
 984}
 985
 986static int gmc_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
 987{
 988	struct amdgpu_device *adev = ip_block->adev;
 989
 990	u32 srbm_soft_reset = 0;
 991	u32 tmp = RREG32(mmSRBM_STATUS);
 992
 993	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
 994		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
 995						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
 996
 997	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
 998		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
 999		if (!(adev->flags & AMD_IS_APU))
1000			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1001							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1002	}
1003
1004	if (srbm_soft_reset) {
1005		gmc_v6_0_mc_stop(adev);
1006
1007		if (gmc_v6_0_wait_for_idle(ip_block))
1008			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
 
 
1009
1010		tmp = RREG32(mmSRBM_SOFT_RESET);
1011		tmp |= srbm_soft_reset;
1012		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1013		WREG32(mmSRBM_SOFT_RESET, tmp);
1014		tmp = RREG32(mmSRBM_SOFT_RESET);
1015
1016		udelay(50);
1017
1018		tmp &= ~srbm_soft_reset;
1019		WREG32(mmSRBM_SOFT_RESET, tmp);
1020		tmp = RREG32(mmSRBM_SOFT_RESET);
1021
1022		udelay(50);
1023
1024		gmc_v6_0_mc_resume(adev);
1025		udelay(50);
1026	}
1027
1028	return 0;
1029}
1030
1031static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1032					     struct amdgpu_irq_src *src,
1033					     unsigned int type,
1034					     enum amdgpu_interrupt_state state)
1035{
1036	u32 tmp;
1037	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1038		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1039		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1040		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1041		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1042		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1043
1044	switch (state) {
1045	case AMDGPU_IRQ_STATE_DISABLE:
1046		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1047		tmp &= ~bits;
1048		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1049		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1050		tmp &= ~bits;
1051		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1052		break;
1053	case AMDGPU_IRQ_STATE_ENABLE:
1054		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1055		tmp |= bits;
1056		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1057		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1058		tmp |= bits;
1059		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1060		break;
1061	default:
1062		break;
1063	}
1064
1065	return 0;
1066}
1067
1068static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
1069				      struct amdgpu_irq_src *source,
1070				      struct amdgpu_iv_entry *entry)
1071{
1072	u32 addr, status;
1073
1074	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1075	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1076	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1077
1078	if (!addr && !status)
1079		return 0;
1080
1081	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1082		gmc_v6_0_set_fault_enable_default(adev, false);
1083
1084	if (printk_ratelimit()) {
1085		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1086			entry->src_id, entry->src_data[0]);
1087		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1088			addr);
1089		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1090			status);
1091		gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
1092	}
1093
1094	return 0;
1095}
1096
1097static int gmc_v6_0_set_clockgating_state(void *handle,
1098					  enum amd_clockgating_state state)
1099{
1100	return 0;
1101}
1102
1103static int gmc_v6_0_set_powergating_state(void *handle,
1104					  enum amd_powergating_state state)
1105{
1106	return 0;
1107}
1108
1109static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
1110	.name = "gmc_v6_0",
1111	.early_init = gmc_v6_0_early_init,
1112	.late_init = gmc_v6_0_late_init,
1113	.sw_init = gmc_v6_0_sw_init,
1114	.sw_fini = gmc_v6_0_sw_fini,
1115	.hw_init = gmc_v6_0_hw_init,
1116	.hw_fini = gmc_v6_0_hw_fini,
1117	.suspend = gmc_v6_0_suspend,
1118	.resume = gmc_v6_0_resume,
1119	.is_idle = gmc_v6_0_is_idle,
1120	.wait_for_idle = gmc_v6_0_wait_for_idle,
1121	.soft_reset = gmc_v6_0_soft_reset,
1122	.set_clockgating_state = gmc_v6_0_set_clockgating_state,
1123	.set_powergating_state = gmc_v6_0_set_powergating_state,
1124};
1125
1126static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
1127	.flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
1128	.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
1129	.set_prt = gmc_v6_0_set_prt,
1130	.get_vm_pde = gmc_v6_0_get_vm_pde,
1131	.get_vm_pte = gmc_v6_0_get_vm_pte,
1132	.get_vbios_fb_size = gmc_v6_0_get_vbios_fb_size,
1133};
1134
1135static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
1136	.set = gmc_v6_0_vm_fault_interrupt_state,
1137	.process = gmc_v6_0_process_interrupt,
1138};
1139
1140static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
1141{
1142	adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
1143}
1144
1145static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1146{
1147	adev->gmc.vm_fault.num_types = 1;
1148	adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
1149}
1150
1151const struct amdgpu_ip_block_version gmc_v6_0_ip_block = {
 
1152	.type = AMD_IP_BLOCK_TYPE_GMC,
1153	.major = 6,
1154	.minor = 0,
1155	.rev = 0,
1156	.funcs = &gmc_v6_0_ip_funcs,
1157};
v5.14.15
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25#include <linux/module.h>
  26#include <linux/pci.h>
  27
  28#include <drm/drm_cache.h>
  29#include "amdgpu.h"
  30#include "gmc_v6_0.h"
  31#include "amdgpu_ucode.h"
  32#include "amdgpu_gem.h"
  33
  34#include "bif/bif_3_0_d.h"
  35#include "bif/bif_3_0_sh_mask.h"
  36#include "oss/oss_1_0_d.h"
  37#include "oss/oss_1_0_sh_mask.h"
  38#include "gmc/gmc_6_0_d.h"
  39#include "gmc/gmc_6_0_sh_mask.h"
  40#include "dce/dce_6_0_d.h"
  41#include "dce/dce_6_0_sh_mask.h"
  42#include "si_enums.h"
  43
  44static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
  45static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
  46static int gmc_v6_0_wait_for_idle(void *handle);
  47
  48MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
  49MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
  50MODULE_FIRMWARE("amdgpu/verde_mc.bin");
  51MODULE_FIRMWARE("amdgpu/oland_mc.bin");
  52MODULE_FIRMWARE("amdgpu/hainan_mc.bin");
  53MODULE_FIRMWARE("amdgpu/si58_mc.bin");
  54
  55#define MC_SEQ_MISC0__MT__MASK   0xf0000000
  56#define MC_SEQ_MISC0__MT__GDDR1  0x10000000
  57#define MC_SEQ_MISC0__MT__DDR2   0x20000000
  58#define MC_SEQ_MISC0__MT__GDDR3  0x30000000
  59#define MC_SEQ_MISC0__MT__GDDR4  0x40000000
  60#define MC_SEQ_MISC0__MT__GDDR5  0x50000000
  61#define MC_SEQ_MISC0__MT__HBM    0x60000000
  62#define MC_SEQ_MISC0__MT__DDR3   0xB0000000
  63
  64static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
  65{
  66	u32 blackout;
 
  67
  68	gmc_v6_0_wait_for_idle((void *)adev);
 
 
 
 
  69
  70	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  71	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
  72		/* Block CPU access */
  73		WREG32(mmBIF_FB_EN, 0);
  74		/* blackout the MC */
  75		blackout = REG_SET_FIELD(blackout,
  76					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  77		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
  78	}
  79	/* wait for the MC to settle */
  80	udelay(100);
  81
  82}
  83
  84static void gmc_v6_0_mc_resume(struct amdgpu_device *adev)
  85{
  86	u32 tmp;
  87
  88	/* unblackout the MC */
  89	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  90	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  91	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
  92	/* allow CPU access */
  93	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
  94	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
  95	WREG32(mmBIF_FB_EN, tmp);
  96}
  97
  98static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
  99{
 100	const char *chip_name;
 101	char fw_name[30];
 102	int err;
 103	bool is_58_fw = false;
 104
 105	DRM_DEBUG("\n");
 106
 107	switch (adev->asic_type) {
 108	case CHIP_TAHITI:
 109		chip_name = "tahiti";
 110		break;
 111	case CHIP_PITCAIRN:
 112		chip_name = "pitcairn";
 113		break;
 114	case CHIP_VERDE:
 115		chip_name = "verde";
 116		break;
 117	case CHIP_OLAND:
 118		chip_name = "oland";
 119		break;
 120	case CHIP_HAINAN:
 121		chip_name = "hainan";
 122		break;
 123	default: BUG();
 
 124	}
 125
 126	/* this memory configuration requires special firmware */
 127	if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
 128		is_58_fw = true;
 129
 130	if (is_58_fw)
 131		snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
 132	else
 133		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
 134	err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
 135	if (err)
 136		goto out;
 137
 138	err = amdgpu_ucode_validate(adev->gmc.fw);
 139
 140out:
 141	if (err) {
 142		dev_err(adev->dev,
 143		       "si_mc: Failed to load firmware \"%s\"\n",
 144		       fw_name);
 145		release_firmware(adev->gmc.fw);
 146		adev->gmc.fw = NULL;
 147	}
 148	return err;
 149}
 150
 151static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
 152{
 153	const __le32 *new_fw_data = NULL;
 154	u32 running;
 155	const __le32 *new_io_mc_regs = NULL;
 156	int i, regs_size, ucode_size;
 157	const struct mc_firmware_header_v1_0 *hdr;
 158
 159	if (!adev->gmc.fw)
 160		return -EINVAL;
 161
 162	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
 163
 164	amdgpu_ucode_print_mc_hdr(&hdr->header);
 165
 166	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 167	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
 168	new_io_mc_regs = (const __le32 *)
 169		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
 170	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 171	new_fw_data = (const __le32 *)
 172		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 173
 174	running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
 175
 176	if (running == 0) {
 177
 178		/* reset the engine and set to writable */
 179		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 180		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
 181
 182		/* load mc io regs */
 183		for (i = 0; i < regs_size; i++) {
 184			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
 185			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
 186		}
 187		/* load the MC ucode */
 188		for (i = 0; i < ucode_size; i++) {
 189			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
 190		}
 191
 192		/* put the engine back into the active state */
 193		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 194		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
 195		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
 196
 197		/* wait for training to complete */
 198		for (i = 0; i < adev->usec_timeout; i++) {
 199			if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK)
 200				break;
 201			udelay(1);
 202		}
 203		for (i = 0; i < adev->usec_timeout; i++) {
 204			if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK)
 205				break;
 206			udelay(1);
 207		}
 208
 209	}
 210
 211	return 0;
 212}
 213
 214static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
 215				       struct amdgpu_gmc *mc)
 216{
 217	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
 
 218	base <<= 24;
 219
 
 220	amdgpu_gmc_vram_location(adev, mc, base);
 221	amdgpu_gmc_gart_location(adev, mc);
 222}
 223
 224static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
 225{
 226	int i, j;
 
 
 227
 228	/* Initialize HDP */
 229	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
 230		WREG32((0xb05 + j), 0x00000000);
 231		WREG32((0xb06 + j), 0x00000000);
 232		WREG32((0xb07 + j), 0x00000000);
 233		WREG32((0xb08 + j), 0x00000000);
 234		WREG32((0xb09 + j), 0x00000000);
 235	}
 236	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 237
 238	if (gmc_v6_0_wait_for_idle((void *)adev)) {
 
 
 
 
 239		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 240	}
 241
 242	if (adev->mode_info.num_crtc) {
 243		u32 tmp;
 244
 245		/* Lockout access through VGA aperture*/
 246		tmp = RREG32(mmVGA_HDP_CONTROL);
 247		tmp |= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK;
 248		WREG32(mmVGA_HDP_CONTROL, tmp);
 249
 250		/* disable VGA render */
 251		tmp = RREG32(mmVGA_RENDER_CONTROL);
 252		tmp &= ~VGA_VSTATUS_CNTL;
 253		WREG32(mmVGA_RENDER_CONTROL, tmp);
 254	}
 255	/* Update configuration */
 256	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 257	       adev->gmc.vram_start >> 12);
 258	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 259	       adev->gmc.vram_end >> 12);
 260	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
 261	       adev->vram_scratch.gpu_addr >> 12);
 262	WREG32(mmMC_VM_AGP_BASE, 0);
 263	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
 264	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
 265
 266	if (gmc_v6_0_wait_for_idle((void *)adev)) {
 267		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 268	}
 269}
 270
 271static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
 272{
 273
 274	u32 tmp;
 275	int chansize, numchan;
 276	int r;
 277
 278	tmp = RREG32(mmMC_ARB_RAMCFG);
 279	if (tmp & (1 << 11)) {
 280		chansize = 16;
 281	} else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) {
 282		chansize = 64;
 283	} else {
 284		chansize = 32;
 285	}
 286	tmp = RREG32(mmMC_SHARED_CHMAP);
 287	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 288	case 0:
 289	default:
 290		numchan = 1;
 291		break;
 292	case 1:
 293		numchan = 2;
 294		break;
 295	case 2:
 296		numchan = 4;
 297		break;
 298	case 3:
 299		numchan = 8;
 300		break;
 301	case 4:
 302		numchan = 3;
 303		break;
 304	case 5:
 305		numchan = 6;
 306		break;
 307	case 6:
 308		numchan = 10;
 309		break;
 310	case 7:
 311		numchan = 12;
 312		break;
 313	case 8:
 314		numchan = 16;
 315		break;
 316	}
 317	adev->gmc.vram_width = numchan * chansize;
 318	/* size in MB on si */
 319	adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 320	adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 321
 322	if (!(adev->flags & AMD_IS_APU)) {
 323		r = amdgpu_device_resize_fb_bar(adev);
 324		if (r)
 325			return r;
 326	}
 327	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
 328	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 329	adev->gmc.visible_vram_size = adev->gmc.aper_size;
 330
 331	/* set the gart size */
 332	if (amdgpu_gart_size == -1) {
 333		switch (adev->asic_type) {
 334		case CHIP_HAINAN:    /* no MM engines */
 335		default:
 336			adev->gmc.gart_size = 256ULL << 20;
 337			break;
 338		case CHIP_VERDE:    /* UVD, VCE do not support GPUVM */
 339		case CHIP_TAHITI:   /* UVD, VCE do not support GPUVM */
 340		case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */
 341		case CHIP_OLAND:    /* UVD, VCE do not support GPUVM */
 342			adev->gmc.gart_size = 1024ULL << 20;
 343			break;
 344		}
 345	} else {
 346		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
 347	}
 348
 349	adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
 350	gmc_v6_0_vram_gtt_location(adev, &adev->gmc);
 351
 352	return 0;
 353}
 354
 355static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 356					uint32_t vmhub, uint32_t flush_type)
 357{
 358	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 359}
 360
 361static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 362					    unsigned vmid, uint64_t pd_addr)
 363{
 364	uint32_t reg;
 365
 366	/* write new base address */
 367	if (vmid < 8)
 368		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
 369	else
 370		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8);
 371	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
 372
 373	/* bits 0-15 are the VM contexts0-15 */
 374	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
 375
 376	return pd_addr;
 377}
 378
 379static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
 380				uint64_t *addr, uint64_t *flags)
 381{
 382	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
 383}
 384
 385static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
 386				struct amdgpu_bo_va_mapping *mapping,
 387				uint64_t *flags)
 388{
 389	*flags &= ~AMDGPU_PTE_EXECUTABLE;
 390	*flags &= ~AMDGPU_PTE_PRT;
 391}
 392
 393static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
 394					      bool value)
 395{
 396	u32 tmp;
 397
 398	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 399	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 400			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 401	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 402			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 403	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 404			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 405	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 406			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 407	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 408			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 409	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 410			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 411	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 412}
 413
 414 /**
 415   + * gmc_v8_0_set_prt - set PRT VM fault
 416   + *
 417   + * @adev: amdgpu_device pointer
 418   + * @enable: enable/disable VM fault handling for PRT
 419   +*/
 420static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
 421{
 422	u32 tmp;
 423
 424	if (enable && !adev->gmc.prt_warning) {
 425		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
 426		adev->gmc.prt_warning = true;
 427	}
 428
 429	tmp = RREG32(mmVM_PRT_CNTL);
 430	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 431			    CB_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
 432			    enable);
 433	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 434			    TC_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
 435			    enable);
 436	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 437			    L2_CACHE_STORE_INVALID_ENTRIES,
 438			    enable);
 439	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 440			    L1_TLB_STORE_INVALID_ENTRIES,
 441			    enable);
 442	WREG32(mmVM_PRT_CNTL, tmp);
 443
 444	if (enable) {
 445		uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
 
 446		uint32_t high = adev->vm_manager.max_pfn -
 447			(AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
 448
 449		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
 450		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
 451		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
 452		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
 453		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
 454		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
 455		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
 456		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
 457	} else {
 458		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
 459		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
 460		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
 461		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
 462		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
 463		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
 464		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
 465		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
 466	}
 467}
 468
 469static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 470{
 471	uint64_t table_addr;
 472	int r, i;
 473	u32 field;
 
 474
 475	if (adev->gart.bo == NULL) {
 476		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 477		return -EINVAL;
 478	}
 479	r = amdgpu_gart_table_vram_pin(adev);
 480	if (r)
 481		return r;
 482
 483	table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
 484
 485	/* Setup TLB control */
 486	WREG32(mmMC_VM_MX_L1_TLB_CNTL,
 487	       (0xA << 7) |
 488	       MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK |
 489	       MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK |
 490	       MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
 491	       MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK |
 492	       (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
 493	/* Setup L2 cache */
 494	WREG32(mmVM_L2_CNTL,
 495	       VM_L2_CNTL__ENABLE_L2_CACHE_MASK |
 496	       VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK |
 497	       VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 498	       VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 499	       (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
 500	       (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
 501	WREG32(mmVM_L2_CNTL2,
 502	       VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
 503	       VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
 504
 505	field = adev->vm_manager.fragment_size;
 506	WREG32(mmVM_L2_CNTL3,
 507	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
 508	       (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
 509	       (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
 510	/* setup context0 */
 511	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
 512	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
 513	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
 514	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 515			(u32)(adev->dummy_page_addr >> 12));
 516	WREG32(mmVM_CONTEXT0_CNTL2, 0);
 517	WREG32(mmVM_CONTEXT0_CNTL,
 518	       VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
 519	       (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
 520	       VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
 521
 522	WREG32(0x575, 0);
 523	WREG32(0x576, 0);
 524	WREG32(0x577, 0);
 525
 526	/* empty context1-15 */
 527	/* set vm size, must be a multiple of 4 */
 528	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
 529	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
 530	/* Assign the pt base to something valid for now; the pts used for
 531	 * the VMs are determined by the application and setup and assigned
 532	 * on the fly in the vm part of radeon_gart.c
 533	 */
 534	for (i = 1; i < AMDGPU_NUM_VMID; i++) {
 535		if (i < 8)
 536			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
 537			       table_addr >> 12);
 538		else
 539			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
 540			       table_addr >> 12);
 541	}
 542
 543	/* enable context1-15 */
 544	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
 545	       (u32)(adev->dummy_page_addr >> 12));
 546	WREG32(mmVM_CONTEXT1_CNTL2, 4);
 547	WREG32(mmVM_CONTEXT1_CNTL,
 548	       VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
 549	       (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
 550	       ((adev->vm_manager.block_size - 9)
 551	       << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
 552	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
 553		gmc_v6_0_set_fault_enable_default(adev, false);
 554	else
 555		gmc_v6_0_set_fault_enable_default(adev, true);
 556
 557	gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0);
 558	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
 559		 (unsigned)(adev->gmc.gart_size >> 20),
 560		 (unsigned long long)table_addr);
 561	adev->gart.ready = true;
 562	return 0;
 563}
 564
 565static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
 566{
 567	int r;
 568
 569	if (adev->gart.bo) {
 570		dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
 571		return 0;
 572	}
 573	r = amdgpu_gart_init(adev);
 574	if (r)
 575		return r;
 576	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 577	adev->gart.gart_pte_flags = 0;
 578	return amdgpu_gart_table_vram_alloc(adev);
 579}
 580
 581static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
 582{
 583	/*unsigned i;
 584
 585	for (i = 1; i < 16; ++i) {
 586		uint32_t reg;
 587		if (i < 8)
 588			reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ;
 589		else
 590			reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8);
 591		adev->vm_manager.saved_table_addr[i] = RREG32(reg);
 592	}*/
 593
 594	/* Disable all tables */
 595	WREG32(mmVM_CONTEXT0_CNTL, 0);
 596	WREG32(mmVM_CONTEXT1_CNTL, 0);
 597	/* Setup TLB control */
 598	WREG32(mmMC_VM_MX_L1_TLB_CNTL,
 599	       MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
 600	       (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
 601	/* Setup L2 cache */
 602	WREG32(mmVM_L2_CNTL,
 603	       VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 604	       VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 605	       (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
 606	       (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
 607	WREG32(mmVM_L2_CNTL2, 0);
 608	WREG32(mmVM_L2_CNTL3,
 609	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
 610	       (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
 611	amdgpu_gart_table_vram_unpin(adev);
 612}
 613
 614static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
 615				     u32 status, u32 addr, u32 mc_client)
 616{
 617	u32 mc_id;
 618	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
 619	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 620					PROTECTIONS);
 621	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
 622		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
 623
 624	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 625			      MEMORY_CLIENT_ID);
 626
 627	dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
 628	       protections, vmid, addr,
 629	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 630			     MEMORY_CLIENT_RW) ?
 631	       "write" : "read", block, mc_client, mc_id);
 632}
 633
 634/*
 635static const u32 mc_cg_registers[] = {
 636	MC_HUB_MISC_HUB_CG,
 637	MC_HUB_MISC_SIP_CG,
 638	MC_HUB_MISC_VM_CG,
 639	MC_XPB_CLK_GAT,
 640	ATC_MISC_CG,
 641	MC_CITF_MISC_WR_CG,
 642	MC_CITF_MISC_RD_CG,
 643	MC_CITF_MISC_VM_CG,
 644	VM_L2_CG,
 645};
 646
 647static const u32 mc_cg_ls_en[] = {
 648	MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
 649	MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
 650	MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
 651	MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
 652	ATC_MISC_CG__MEM_LS_ENABLE_MASK,
 653	MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
 654	MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
 655	MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
 656	VM_L2_CG__MEM_LS_ENABLE_MASK,
 657};
 658
 659static const u32 mc_cg_en[] = {
 660	MC_HUB_MISC_HUB_CG__ENABLE_MASK,
 661	MC_HUB_MISC_SIP_CG__ENABLE_MASK,
 662	MC_HUB_MISC_VM_CG__ENABLE_MASK,
 663	MC_XPB_CLK_GAT__ENABLE_MASK,
 664	ATC_MISC_CG__ENABLE_MASK,
 665	MC_CITF_MISC_WR_CG__ENABLE_MASK,
 666	MC_CITF_MISC_RD_CG__ENABLE_MASK,
 667	MC_CITF_MISC_VM_CG__ENABLE_MASK,
 668	VM_L2_CG__ENABLE_MASK,
 669};
 670
 671static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
 672				  bool enable)
 673{
 674	int i;
 675	u32 orig, data;
 676
 677	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 678		orig = data = RREG32(mc_cg_registers[i]);
 679		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
 680			data |= mc_cg_ls_en[i];
 681		else
 682			data &= ~mc_cg_ls_en[i];
 683		if (data != orig)
 684			WREG32(mc_cg_registers[i], data);
 685	}
 686}
 687
 688static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
 689				    bool enable)
 690{
 691	int i;
 692	u32 orig, data;
 693
 694	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 695		orig = data = RREG32(mc_cg_registers[i]);
 696		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
 697			data |= mc_cg_en[i];
 698		else
 699			data &= ~mc_cg_en[i];
 700		if (data != orig)
 701			WREG32(mc_cg_registers[i], data);
 702	}
 703}
 704
 705static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
 706				     bool enable)
 707{
 708	u32 orig, data;
 709
 710	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
 711
 712	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
 713		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
 714		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
 715		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
 716		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
 717	} else {
 718		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
 719		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
 720		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
 721		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
 722	}
 723
 724	if (orig != data)
 725		WREG32_PCIE(ixPCIE_CNTL2, data);
 726}
 727
 728static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
 729				     bool enable)
 730{
 731	u32 orig, data;
 732
 733	orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
 734
 735	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
 736		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
 737	else
 738		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
 739
 740	if (orig != data)
 741		WREG32(mmHDP_HOST_PATH_CNTL, data);
 742}
 743
 744static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
 745				   bool enable)
 746{
 747	u32 orig, data;
 748
 749	orig = data = RREG32(mmHDP_MEM_POWER_LS);
 750
 751	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
 752		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
 753	else
 754		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
 755
 756	if (orig != data)
 757		WREG32(mmHDP_MEM_POWER_LS, data);
 758}
 759*/
 760
 761static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
 762{
 763	switch (mc_seq_vram_type) {
 764	case MC_SEQ_MISC0__MT__GDDR1:
 765		return AMDGPU_VRAM_TYPE_GDDR1;
 766	case MC_SEQ_MISC0__MT__DDR2:
 767		return AMDGPU_VRAM_TYPE_DDR2;
 768	case MC_SEQ_MISC0__MT__GDDR3:
 769		return AMDGPU_VRAM_TYPE_GDDR3;
 770	case MC_SEQ_MISC0__MT__GDDR4:
 771		return AMDGPU_VRAM_TYPE_GDDR4;
 772	case MC_SEQ_MISC0__MT__GDDR5:
 773		return AMDGPU_VRAM_TYPE_GDDR5;
 774	case MC_SEQ_MISC0__MT__DDR3:
 775		return AMDGPU_VRAM_TYPE_DDR3;
 776	default:
 777		return AMDGPU_VRAM_TYPE_UNKNOWN;
 778	}
 779}
 780
 781static int gmc_v6_0_early_init(void *handle)
 782{
 783	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 784
 785	gmc_v6_0_set_gmc_funcs(adev);
 786	gmc_v6_0_set_irq_funcs(adev);
 787
 788	return 0;
 789}
 790
 791static int gmc_v6_0_late_init(void *handle)
 792{
 793	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 794
 795	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
 796		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
 797	else
 798		return 0;
 799}
 800
 801static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
 802{
 803	u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
 804	unsigned size;
 805
 806	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
 807		size = AMDGPU_VBIOS_VGA_ALLOCATION;
 808	} else {
 809		u32 viewport = RREG32(mmVIEWPORT_SIZE);
 
 810		size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
 811			REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
 812			4);
 813	}
 814	return size;
 815}
 816
 817static int gmc_v6_0_sw_init(void *handle)
 818{
 819	int r;
 820	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 821
 822	adev->num_vmhubs = 1;
 823
 824	if (adev->flags & AMD_IS_APU) {
 825		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
 826	} else {
 827		u32 tmp = RREG32(mmMC_SEQ_MISC0);
 
 828		tmp &= MC_SEQ_MISC0__MT__MASK;
 829		adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
 830	}
 831
 832	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
 833	if (r)
 834		return r;
 835
 836	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
 837	if (r)
 838		return r;
 839
 840	amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
 841
 842	adev->gmc.mc_mask = 0xffffffffffULL;
 843
 844	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
 845	if (r) {
 846		dev_warn(adev->dev, "No suitable DMA available.\n");
 847		return r;
 848	}
 849	adev->need_swiotlb = drm_need_swiotlb(44);
 850
 851	r = gmc_v6_0_init_microcode(adev);
 852	if (r) {
 853		dev_err(adev->dev, "Failed to load mc firmware!\n");
 854		return r;
 855	}
 856
 857	r = gmc_v6_0_mc_init(adev);
 858	if (r)
 859		return r;
 860
 861	amdgpu_gmc_get_vbios_allocations(adev);
 862
 863	r = amdgpu_bo_init(adev);
 864	if (r)
 865		return r;
 866
 867	r = gmc_v6_0_gart_init(adev);
 868	if (r)
 869		return r;
 870
 871	/*
 872	 * number of VMs
 873	 * VMID 0 is reserved for System
 874	 * amdgpu graphics/compute will use VMIDs 1-7
 875	 * amdkfd will use VMIDs 8-15
 876	 */
 877	adev->vm_manager.first_kfd_vmid = 8;
 878	amdgpu_vm_manager_init(adev);
 879
 880	/* base offset of vram pages */
 881	if (adev->flags & AMD_IS_APU) {
 882		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
 883
 884		tmp <<= 22;
 885		adev->vm_manager.vram_base_offset = tmp;
 886	} else {
 887		adev->vm_manager.vram_base_offset = 0;
 888	}
 889
 890	return 0;
 891}
 892
 893static int gmc_v6_0_sw_fini(void *handle)
 894{
 895	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 896
 897	amdgpu_gem_force_release(adev);
 898	amdgpu_vm_manager_fini(adev);
 899	amdgpu_gart_table_vram_free(adev);
 900	amdgpu_bo_fini(adev);
 901	release_firmware(adev->gmc.fw);
 902	adev->gmc.fw = NULL;
 903
 904	return 0;
 905}
 906
 907static int gmc_v6_0_hw_init(void *handle)
 908{
 909	int r;
 910	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 911
 912	gmc_v6_0_mc_program(adev);
 913
 914	if (!(adev->flags & AMD_IS_APU)) {
 915		r = gmc_v6_0_mc_load_microcode(adev);
 916		if (r) {
 917			dev_err(adev->dev, "Failed to load MC firmware!\n");
 918			return r;
 919		}
 920	}
 921
 922	r = gmc_v6_0_gart_enable(adev);
 923	if (r)
 924		return r;
 925
 926	return r;
 
 
 
 927}
 928
 929static int gmc_v6_0_hw_fini(void *handle)
 930{
 931	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 932
 933	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
 934	gmc_v6_0_gart_disable(adev);
 935
 936	return 0;
 937}
 938
 939static int gmc_v6_0_suspend(void *handle)
 940{
 941	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 942
 943	gmc_v6_0_hw_fini(adev);
 944
 945	return 0;
 946}
 947
 948static int gmc_v6_0_resume(void *handle)
 949{
 950	int r;
 951	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 952
 953	r = gmc_v6_0_hw_init(adev);
 954	if (r)
 955		return r;
 956
 957	amdgpu_vmid_reset_all(adev);
 958
 959	return 0;
 960}
 961
 962static bool gmc_v6_0_is_idle(void *handle)
 963{
 964	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 965	u32 tmp = RREG32(mmSRBM_STATUS);
 966
 967	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
 968		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
 969		return false;
 970
 971	return true;
 972}
 973
 974static int gmc_v6_0_wait_for_idle(void *handle)
 975{
 976	unsigned i;
 977	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 978
 979	for (i = 0; i < adev->usec_timeout; i++) {
 980		if (gmc_v6_0_is_idle(handle))
 981			return 0;
 982		udelay(1);
 983	}
 984	return -ETIMEDOUT;
 985
 986}
 987
 988static int gmc_v6_0_soft_reset(void *handle)
 989{
 990	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 991	u32 srbm_soft_reset = 0;
 992	u32 tmp = RREG32(mmSRBM_STATUS);
 993
 994	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
 995		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
 996						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
 997
 998	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
 999		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1000		if (!(adev->flags & AMD_IS_APU))
1001			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1002							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1003	}
1004
1005	if (srbm_soft_reset) {
1006		gmc_v6_0_mc_stop(adev);
1007		if (gmc_v6_0_wait_for_idle(adev)) {
 
1008			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1009		}
1010
1011
1012		tmp = RREG32(mmSRBM_SOFT_RESET);
1013		tmp |= srbm_soft_reset;
1014		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1015		WREG32(mmSRBM_SOFT_RESET, tmp);
1016		tmp = RREG32(mmSRBM_SOFT_RESET);
1017
1018		udelay(50);
1019
1020		tmp &= ~srbm_soft_reset;
1021		WREG32(mmSRBM_SOFT_RESET, tmp);
1022		tmp = RREG32(mmSRBM_SOFT_RESET);
1023
1024		udelay(50);
1025
1026		gmc_v6_0_mc_resume(adev);
1027		udelay(50);
1028	}
1029
1030	return 0;
1031}
1032
1033static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1034					     struct amdgpu_irq_src *src,
1035					     unsigned type,
1036					     enum amdgpu_interrupt_state state)
1037{
1038	u32 tmp;
1039	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1040		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1041		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1042		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1043		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1044		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1045
1046	switch (state) {
1047	case AMDGPU_IRQ_STATE_DISABLE:
1048		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1049		tmp &= ~bits;
1050		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1051		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1052		tmp &= ~bits;
1053		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1054		break;
1055	case AMDGPU_IRQ_STATE_ENABLE:
1056		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1057		tmp |= bits;
1058		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1059		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1060		tmp |= bits;
1061		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1062		break;
1063	default:
1064		break;
1065	}
1066
1067	return 0;
1068}
1069
1070static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
1071				      struct amdgpu_irq_src *source,
1072				      struct amdgpu_iv_entry *entry)
1073{
1074	u32 addr, status;
1075
1076	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1077	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1078	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1079
1080	if (!addr && !status)
1081		return 0;
1082
1083	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1084		gmc_v6_0_set_fault_enable_default(adev, false);
1085
1086	if (printk_ratelimit()) {
1087		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1088			entry->src_id, entry->src_data[0]);
1089		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1090			addr);
1091		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1092			status);
1093		gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
1094	}
1095
1096	return 0;
1097}
1098
1099static int gmc_v6_0_set_clockgating_state(void *handle,
1100					  enum amd_clockgating_state state)
1101{
1102	return 0;
1103}
1104
1105static int gmc_v6_0_set_powergating_state(void *handle,
1106					  enum amd_powergating_state state)
1107{
1108	return 0;
1109}
1110
1111static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
1112	.name = "gmc_v6_0",
1113	.early_init = gmc_v6_0_early_init,
1114	.late_init = gmc_v6_0_late_init,
1115	.sw_init = gmc_v6_0_sw_init,
1116	.sw_fini = gmc_v6_0_sw_fini,
1117	.hw_init = gmc_v6_0_hw_init,
1118	.hw_fini = gmc_v6_0_hw_fini,
1119	.suspend = gmc_v6_0_suspend,
1120	.resume = gmc_v6_0_resume,
1121	.is_idle = gmc_v6_0_is_idle,
1122	.wait_for_idle = gmc_v6_0_wait_for_idle,
1123	.soft_reset = gmc_v6_0_soft_reset,
1124	.set_clockgating_state = gmc_v6_0_set_clockgating_state,
1125	.set_powergating_state = gmc_v6_0_set_powergating_state,
1126};
1127
1128static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
1129	.flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
1130	.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
1131	.set_prt = gmc_v6_0_set_prt,
1132	.get_vm_pde = gmc_v6_0_get_vm_pde,
1133	.get_vm_pte = gmc_v6_0_get_vm_pte,
1134	.get_vbios_fb_size = gmc_v6_0_get_vbios_fb_size,
1135};
1136
1137static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
1138	.set = gmc_v6_0_vm_fault_interrupt_state,
1139	.process = gmc_v6_0_process_interrupt,
1140};
1141
1142static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
1143{
1144	adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
1145}
1146
1147static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1148{
1149	adev->gmc.vm_fault.num_types = 1;
1150	adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
1151}
1152
1153const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
1154{
1155	.type = AMD_IP_BLOCK_TYPE_GMC,
1156	.major = 6,
1157	.minor = 0,
1158	.rev = 0,
1159	.funcs = &gmc_v6_0_ip_funcs,
1160};