Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25#include <linux/module.h>
  26#include <linux/pci.h>
  27
  28#include <drm/drm_cache.h>
  29#include "amdgpu.h"
  30#include "gmc_v6_0.h"
  31#include "amdgpu_ucode.h"
  32#include "amdgpu_gem.h"
  33
  34#include "bif/bif_3_0_d.h"
  35#include "bif/bif_3_0_sh_mask.h"
  36#include "oss/oss_1_0_d.h"
  37#include "oss/oss_1_0_sh_mask.h"
  38#include "gmc/gmc_6_0_d.h"
  39#include "gmc/gmc_6_0_sh_mask.h"
  40#include "dce/dce_6_0_d.h"
  41#include "dce/dce_6_0_sh_mask.h"
  42#include "si_enums.h"
  43
  44static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
  45static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
  46static int gmc_v6_0_wait_for_idle(void *handle);
  47
  48MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
  49MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
  50MODULE_FIRMWARE("amdgpu/verde_mc.bin");
  51MODULE_FIRMWARE("amdgpu/oland_mc.bin");
  52MODULE_FIRMWARE("amdgpu/hainan_mc.bin");
  53MODULE_FIRMWARE("amdgpu/si58_mc.bin");
  54
  55#define MC_SEQ_MISC0__MT__MASK   0xf0000000
  56#define MC_SEQ_MISC0__MT__GDDR1  0x10000000
  57#define MC_SEQ_MISC0__MT__DDR2   0x20000000
  58#define MC_SEQ_MISC0__MT__GDDR3  0x30000000
  59#define MC_SEQ_MISC0__MT__GDDR4  0x40000000
  60#define MC_SEQ_MISC0__MT__GDDR5  0x50000000
  61#define MC_SEQ_MISC0__MT__HBM    0x60000000
  62#define MC_SEQ_MISC0__MT__DDR3   0xB0000000
  63
  64static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
  65{
  66	u32 blackout;
 
  67
  68	gmc_v6_0_wait_for_idle((void *)adev);
 
 
 
 
  69
  70	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  71	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
  72		/* Block CPU access */
  73		WREG32(mmBIF_FB_EN, 0);
  74		/* blackout the MC */
  75		blackout = REG_SET_FIELD(blackout,
  76					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  77		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
  78	}
  79	/* wait for the MC to settle */
  80	udelay(100);
  81
  82}
  83
  84static void gmc_v6_0_mc_resume(struct amdgpu_device *adev)
  85{
  86	u32 tmp;
  87
  88	/* unblackout the MC */
  89	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  90	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  91	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
  92	/* allow CPU access */
  93	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
  94	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
  95	WREG32(mmBIF_FB_EN, tmp);
  96}
  97
  98static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
  99{
 100	const char *chip_name;
 101	char fw_name[30];
 102	int err;
 103	bool is_58_fw = false;
 104
 105	DRM_DEBUG("\n");
 106
 107	switch (adev->asic_type) {
 108	case CHIP_TAHITI:
 109		chip_name = "tahiti";
 110		break;
 111	case CHIP_PITCAIRN:
 112		chip_name = "pitcairn";
 113		break;
 114	case CHIP_VERDE:
 115		chip_name = "verde";
 116		break;
 117	case CHIP_OLAND:
 118		chip_name = "oland";
 119		break;
 120	case CHIP_HAINAN:
 121		chip_name = "hainan";
 122		break;
 123	default:
 124		BUG();
 125	}
 126
 127	/* this memory configuration requires special firmware */
 128	if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
 129		is_58_fw = true;
 130
 131	if (is_58_fw)
 132		snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
 133	else
 134		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
 135	err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name);
 136	if (err) {
 137		dev_err(adev->dev,
 138		       "si_mc: Failed to load firmware \"%s\"\n",
 139		       fw_name);
 140		amdgpu_ucode_release(&adev->gmc.fw);
 141	}
 142	return err;
 143}
 144
 145static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
 146{
 147	const __le32 *new_fw_data = NULL;
 148	u32 running;
 149	const __le32 *new_io_mc_regs = NULL;
 150	int i, regs_size, ucode_size;
 151	const struct mc_firmware_header_v1_0 *hdr;
 152
 153	if (!adev->gmc.fw)
 154		return -EINVAL;
 155
 156	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
 157
 158	amdgpu_ucode_print_mc_hdr(&hdr->header);
 159
 160	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 161	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
 162	new_io_mc_regs = (const __le32 *)
 163		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
 164	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 165	new_fw_data = (const __le32 *)
 166		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 167
 168	running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
 169
 170	if (running == 0) {
 171
 172		/* reset the engine and set to writable */
 173		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 174		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
 175
 176		/* load mc io regs */
 177		for (i = 0; i < regs_size; i++) {
 178			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
 179			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
 180		}
 181		/* load the MC ucode */
 182		for (i = 0; i < ucode_size; i++)
 183			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
 184
 185		/* put the engine back into the active state */
 186		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 187		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
 188		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
 189
 190		/* wait for training to complete */
 191		for (i = 0; i < adev->usec_timeout; i++) {
 192			if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK)
 193				break;
 194			udelay(1);
 195		}
 196		for (i = 0; i < adev->usec_timeout; i++) {
 197			if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK)
 198				break;
 199			udelay(1);
 200		}
 201
 202	}
 203
 204	return 0;
 205}
 206
 207static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
 208				       struct amdgpu_gmc *mc)
 209{
 210	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
 211
 212	base <<= 24;
 213
 214	amdgpu_gmc_set_agp_default(adev, mc);
 215	amdgpu_gmc_vram_location(adev, mc, base);
 216	amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
 217}
 218
 219static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
 220{
 221	int i, j;
 
 
 222
 223	/* Initialize HDP */
 224	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
 225		WREG32((0xb05 + j), 0x00000000);
 226		WREG32((0xb06 + j), 0x00000000);
 227		WREG32((0xb07 + j), 0x00000000);
 228		WREG32((0xb08 + j), 0x00000000);
 229		WREG32((0xb09 + j), 0x00000000);
 230	}
 231	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 232
 233	if (gmc_v6_0_wait_for_idle((void *)adev))
 
 
 
 
 234		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 235
 236	if (adev->mode_info.num_crtc) {
 237		u32 tmp;
 238
 239		/* Lockout access through VGA aperture*/
 240		tmp = RREG32(mmVGA_HDP_CONTROL);
 241		tmp |= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK;
 242		WREG32(mmVGA_HDP_CONTROL, tmp);
 243
 244		/* disable VGA render */
 245		tmp = RREG32(mmVGA_RENDER_CONTROL);
 246		tmp &= ~VGA_VSTATUS_CNTL;
 247		WREG32(mmVGA_RENDER_CONTROL, tmp);
 248	}
 249	/* Update configuration */
 250	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 251	       adev->gmc.vram_start >> 12);
 252	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 253	       adev->gmc.vram_end >> 12);
 254	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
 255	       adev->mem_scratch.gpu_addr >> 12);
 256	WREG32(mmMC_VM_AGP_BASE, 0);
 257	WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22);
 258	WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22);
 259
 260	if (gmc_v6_0_wait_for_idle((void *)adev))
 261		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 262}
 263
 264static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
 265{
 266
 267	u32 tmp;
 268	int chansize, numchan;
 269	int r;
 270
 271	tmp = RREG32(mmMC_ARB_RAMCFG);
 272	if (tmp & (1 << 11))
 273		chansize = 16;
 274	else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK)
 275		chansize = 64;
 276	else
 277		chansize = 32;
 278
 279	tmp = RREG32(mmMC_SHARED_CHMAP);
 280	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 281	case 0:
 282	default:
 283		numchan = 1;
 284		break;
 285	case 1:
 286		numchan = 2;
 287		break;
 288	case 2:
 289		numchan = 4;
 290		break;
 291	case 3:
 292		numchan = 8;
 293		break;
 294	case 4:
 295		numchan = 3;
 296		break;
 297	case 5:
 298		numchan = 6;
 299		break;
 300	case 6:
 301		numchan = 10;
 302		break;
 303	case 7:
 304		numchan = 12;
 305		break;
 306	case 8:
 307		numchan = 16;
 308		break;
 309	}
 310	adev->gmc.vram_width = numchan * chansize;
 311	/* size in MB on si */
 312	adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 313	adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 314
 315	if (!(adev->flags & AMD_IS_APU)) {
 316		r = amdgpu_device_resize_fb_bar(adev);
 317		if (r)
 318			return r;
 319	}
 320	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
 321	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 322	adev->gmc.visible_vram_size = adev->gmc.aper_size;
 323
 324	/* set the gart size */
 325	if (amdgpu_gart_size == -1) {
 326		switch (adev->asic_type) {
 327		case CHIP_HAINAN:    /* no MM engines */
 328		default:
 329			adev->gmc.gart_size = 256ULL << 20;
 330			break;
 331		case CHIP_VERDE:    /* UVD, VCE do not support GPUVM */
 332		case CHIP_TAHITI:   /* UVD, VCE do not support GPUVM */
 333		case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */
 334		case CHIP_OLAND:    /* UVD, VCE do not support GPUVM */
 335			adev->gmc.gart_size = 1024ULL << 20;
 336			break;
 337		}
 338	} else {
 339		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
 340	}
 341
 342	adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
 343	gmc_v6_0_vram_gtt_location(adev, &adev->gmc);
 344
 345	return 0;
 346}
 347
 348static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 349					uint32_t vmhub, uint32_t flush_type)
 350{
 351	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 352}
 353
 354static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 355					    unsigned int vmid, uint64_t pd_addr)
 356{
 357	uint32_t reg;
 358
 359	/* write new base address */
 360	if (vmid < 8)
 361		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
 362	else
 363		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8);
 364	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
 365
 366	/* bits 0-15 are the VM contexts0-15 */
 367	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
 368
 369	return pd_addr;
 370}
 371
 372static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
 373				uint64_t *addr, uint64_t *flags)
 374{
 375	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
 376}
 377
 378static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
 379				struct amdgpu_bo_va_mapping *mapping,
 380				uint64_t *flags)
 381{
 382	*flags &= ~AMDGPU_PTE_EXECUTABLE;
 383	*flags &= ~AMDGPU_PTE_PRT;
 384}
 385
 386static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
 387					      bool value)
 388{
 389	u32 tmp;
 390
 391	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 392	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 393			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 394	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 395			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 396	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 397			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 398	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 399			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 400	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 401			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 402	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 403			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 404	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 405}
 406
 407 /**
 408  * gmc_v8_0_set_prt() - set PRT VM fault
 409  *
 410  * @adev: amdgpu_device pointer
 411  * @enable: enable/disable VM fault handling for PRT
 412  */
 413static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
 414{
 415	u32 tmp;
 416
 417	if (enable && !adev->gmc.prt_warning) {
 418		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
 419		adev->gmc.prt_warning = true;
 420	}
 421
 422	tmp = RREG32(mmVM_PRT_CNTL);
 423	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 424			    CB_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
 425			    enable);
 426	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 427			    TC_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
 428			    enable);
 429	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 430			    L2_CACHE_STORE_INVALID_ENTRIES,
 431			    enable);
 432	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 433			    L1_TLB_STORE_INVALID_ENTRIES,
 434			    enable);
 435	WREG32(mmVM_PRT_CNTL, tmp);
 436
 437	if (enable) {
 438		uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >>
 439			AMDGPU_GPU_PAGE_SHIFT;
 440		uint32_t high = adev->vm_manager.max_pfn -
 441			(AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT);
 442
 443		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
 444		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
 445		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
 446		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
 447		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
 448		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
 449		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
 450		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
 451	} else {
 452		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
 453		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
 454		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
 455		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
 456		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
 457		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
 458		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
 459		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
 460	}
 461}
 462
 463static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 464{
 465	uint64_t table_addr;
 466	u32 field;
 467	int i;
 468
 469	if (adev->gart.bo == NULL) {
 470		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 471		return -EINVAL;
 472	}
 473	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
 474
 475	table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
 476
 477	/* Setup TLB control */
 478	WREG32(mmMC_VM_MX_L1_TLB_CNTL,
 479	       (0xA << 7) |
 480	       MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK |
 481	       MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK |
 482	       MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
 483	       MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK |
 484	       (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
 485	/* Setup L2 cache */
 486	WREG32(mmVM_L2_CNTL,
 487	       VM_L2_CNTL__ENABLE_L2_CACHE_MASK |
 488	       VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK |
 489	       VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 490	       VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 491	       (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
 492	       (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
 493	WREG32(mmVM_L2_CNTL2,
 494	       VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
 495	       VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
 496
 497	field = adev->vm_manager.fragment_size;
 498	WREG32(mmVM_L2_CNTL3,
 499	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
 500	       (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
 501	       (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
 502	/* setup context0 */
 503	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
 504	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
 505	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
 506	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 507			(u32)(adev->dummy_page_addr >> 12));
 508	WREG32(mmVM_CONTEXT0_CNTL2, 0);
 509	WREG32(mmVM_CONTEXT0_CNTL,
 510	       VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
 511	       (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
 512	       VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
 513
 514	WREG32(0x575, 0);
 515	WREG32(0x576, 0);
 516	WREG32(0x577, 0);
 517
 518	/* empty context1-15 */
 519	/* set vm size, must be a multiple of 4 */
 520	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
 521	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
 522	/* Assign the pt base to something valid for now; the pts used for
 523	 * the VMs are determined by the application and setup and assigned
 524	 * on the fly in the vm part of radeon_gart.c
 525	 */
 526	for (i = 1; i < AMDGPU_NUM_VMID; i++) {
 527		if (i < 8)
 528			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
 529			       table_addr >> 12);
 530		else
 531			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
 532			       table_addr >> 12);
 533	}
 534
 535	/* enable context1-15 */
 536	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
 537	       (u32)(adev->dummy_page_addr >> 12));
 538	WREG32(mmVM_CONTEXT1_CNTL2, 4);
 539	WREG32(mmVM_CONTEXT1_CNTL,
 540	       VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
 541	       (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
 542	       ((adev->vm_manager.block_size - 9)
 543	       << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
 544	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
 545		gmc_v6_0_set_fault_enable_default(adev, false);
 546	else
 547		gmc_v6_0_set_fault_enable_default(adev, true);
 548
 549	gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0);
 550	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
 551		 (unsigned int)(adev->gmc.gart_size >> 20),
 552		 (unsigned long long)table_addr);
 553	return 0;
 554}
 555
 556static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
 557{
 558	int r;
 559
 560	if (adev->gart.bo) {
 561		dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
 562		return 0;
 563	}
 564	r = amdgpu_gart_init(adev);
 565	if (r)
 566		return r;
 567	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 568	adev->gart.gart_pte_flags = 0;
 569	return amdgpu_gart_table_vram_alloc(adev);
 570}
 571
 572static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
 573{
 574	/*unsigned i;
 575
 576	for (i = 1; i < 16; ++i) {
 577		uint32_t reg;
 578		if (i < 8)
 579			reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ;
 580		else
 581			reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8);
 582		adev->vm_manager.saved_table_addr[i] = RREG32(reg);
 583	}*/
 584
 585	/* Disable all tables */
 586	WREG32(mmVM_CONTEXT0_CNTL, 0);
 587	WREG32(mmVM_CONTEXT1_CNTL, 0);
 588	/* Setup TLB control */
 589	WREG32(mmMC_VM_MX_L1_TLB_CNTL,
 590	       MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
 591	       (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
 592	/* Setup L2 cache */
 593	WREG32(mmVM_L2_CNTL,
 594	       VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 595	       VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 596	       (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
 597	       (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
 598	WREG32(mmVM_L2_CNTL2, 0);
 599	WREG32(mmVM_L2_CNTL3,
 600	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
 601	       (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
 602}
 603
 604static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
 605				     u32 status, u32 addr, u32 mc_client)
 606{
 607	u32 mc_id;
 608	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
 609	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 610					PROTECTIONS);
 611	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
 612		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
 613
 614	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 615			      MEMORY_CLIENT_ID);
 616
 617	dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
 618	       protections, vmid, addr,
 619	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 620			     MEMORY_CLIENT_RW) ?
 621	       "write" : "read", block, mc_client, mc_id);
 622}
 623
 624/*
 625static const u32 mc_cg_registers[] = {
 626	MC_HUB_MISC_HUB_CG,
 627	MC_HUB_MISC_SIP_CG,
 628	MC_HUB_MISC_VM_CG,
 629	MC_XPB_CLK_GAT,
 630	ATC_MISC_CG,
 631	MC_CITF_MISC_WR_CG,
 632	MC_CITF_MISC_RD_CG,
 633	MC_CITF_MISC_VM_CG,
 634	VM_L2_CG,
 635};
 636
 637static const u32 mc_cg_ls_en[] = {
 638	MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
 639	MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
 640	MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
 641	MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
 642	ATC_MISC_CG__MEM_LS_ENABLE_MASK,
 643	MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
 644	MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
 645	MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
 646	VM_L2_CG__MEM_LS_ENABLE_MASK,
 647};
 648
 649static const u32 mc_cg_en[] = {
 650	MC_HUB_MISC_HUB_CG__ENABLE_MASK,
 651	MC_HUB_MISC_SIP_CG__ENABLE_MASK,
 652	MC_HUB_MISC_VM_CG__ENABLE_MASK,
 653	MC_XPB_CLK_GAT__ENABLE_MASK,
 654	ATC_MISC_CG__ENABLE_MASK,
 655	MC_CITF_MISC_WR_CG__ENABLE_MASK,
 656	MC_CITF_MISC_RD_CG__ENABLE_MASK,
 657	MC_CITF_MISC_VM_CG__ENABLE_MASK,
 658	VM_L2_CG__ENABLE_MASK,
 659};
 660
 661static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
 662				  bool enable)
 663{
 664	int i;
 665	u32 orig, data;
 666
 667	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 668		orig = data = RREG32(mc_cg_registers[i]);
 669		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
 670			data |= mc_cg_ls_en[i];
 671		else
 672			data &= ~mc_cg_ls_en[i];
 673		if (data != orig)
 674			WREG32(mc_cg_registers[i], data);
 675	}
 676}
 677
 678static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
 679				    bool enable)
 680{
 681	int i;
 682	u32 orig, data;
 683
 684	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 685		orig = data = RREG32(mc_cg_registers[i]);
 686		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
 687			data |= mc_cg_en[i];
 688		else
 689			data &= ~mc_cg_en[i];
 690		if (data != orig)
 691			WREG32(mc_cg_registers[i], data);
 692	}
 693}
 694
 695static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
 696				     bool enable)
 697{
 698	u32 orig, data;
 699
 700	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
 701
 702	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
 703		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
 704		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
 705		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
 706		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
 707	} else {
 708		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
 709		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
 710		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
 711		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
 712	}
 713
 714	if (orig != data)
 715		WREG32_PCIE(ixPCIE_CNTL2, data);
 716}
 717
 718static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
 719				     bool enable)
 720{
 721	u32 orig, data;
 722
 723	orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
 724
 725	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
 726		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
 727	else
 728		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
 729
 730	if (orig != data)
 731		WREG32(mmHDP_HOST_PATH_CNTL, data);
 732}
 733
 734static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
 735				   bool enable)
 736{
 737	u32 orig, data;
 738
 739	orig = data = RREG32(mmHDP_MEM_POWER_LS);
 740
 741	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
 742		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
 743	else
 744		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
 745
 746	if (orig != data)
 747		WREG32(mmHDP_MEM_POWER_LS, data);
 748}
 749*/
 750
 751static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
 752{
 753	switch (mc_seq_vram_type) {
 754	case MC_SEQ_MISC0__MT__GDDR1:
 755		return AMDGPU_VRAM_TYPE_GDDR1;
 756	case MC_SEQ_MISC0__MT__DDR2:
 757		return AMDGPU_VRAM_TYPE_DDR2;
 758	case MC_SEQ_MISC0__MT__GDDR3:
 759		return AMDGPU_VRAM_TYPE_GDDR3;
 760	case MC_SEQ_MISC0__MT__GDDR4:
 761		return AMDGPU_VRAM_TYPE_GDDR4;
 762	case MC_SEQ_MISC0__MT__GDDR5:
 763		return AMDGPU_VRAM_TYPE_GDDR5;
 764	case MC_SEQ_MISC0__MT__DDR3:
 765		return AMDGPU_VRAM_TYPE_DDR3;
 766	default:
 767		return AMDGPU_VRAM_TYPE_UNKNOWN;
 768	}
 769}
 770
 771static int gmc_v6_0_early_init(void *handle)
 772{
 773	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 774
 775	gmc_v6_0_set_gmc_funcs(adev);
 776	gmc_v6_0_set_irq_funcs(adev);
 777
 778	return 0;
 779}
 780
 781static int gmc_v6_0_late_init(void *handle)
 782{
 783	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 784
 785	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
 786		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
 787	else
 788		return 0;
 789}
 790
 791static unsigned int gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
 792{
 793	u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
 794	unsigned int size;
 795
 796	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
 797		size = AMDGPU_VBIOS_VGA_ALLOCATION;
 798	} else {
 799		u32 viewport = RREG32(mmVIEWPORT_SIZE);
 800
 801		size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
 802			REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
 803			4);
 804	}
 805	return size;
 806}
 807
 808static int gmc_v6_0_sw_init(void *handle)
 809{
 810	int r;
 811	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 812
 813	set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
 814
 815	if (adev->flags & AMD_IS_APU) {
 816		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
 817	} else {
 818		u32 tmp = RREG32(mmMC_SEQ_MISC0);
 819
 820		tmp &= MC_SEQ_MISC0__MT__MASK;
 821		adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
 822	}
 823
 824	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
 825	if (r)
 826		return r;
 827
 828	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
 829	if (r)
 830		return r;
 831
 832	amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
 833
 834	adev->gmc.mc_mask = 0xffffffffffULL;
 835
 836	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
 837	if (r) {
 838		dev_warn(adev->dev, "No suitable DMA available.\n");
 839		return r;
 840	}
 841	adev->need_swiotlb = drm_need_swiotlb(40);
 842
 843	r = gmc_v6_0_init_microcode(adev);
 844	if (r) {
 845		dev_err(adev->dev, "Failed to load mc firmware!\n");
 846		return r;
 847	}
 848
 849	r = gmc_v6_0_mc_init(adev);
 850	if (r)
 851		return r;
 852
 853	amdgpu_gmc_get_vbios_allocations(adev);
 854
 855	r = amdgpu_bo_init(adev);
 856	if (r)
 857		return r;
 858
 859	r = gmc_v6_0_gart_init(adev);
 860	if (r)
 861		return r;
 862
 863	/*
 864	 * number of VMs
 865	 * VMID 0 is reserved for System
 866	 * amdgpu graphics/compute will use VMIDs 1-7
 867	 * amdkfd will use VMIDs 8-15
 868	 */
 869	adev->vm_manager.first_kfd_vmid = 8;
 870	amdgpu_vm_manager_init(adev);
 871
 872	/* base offset of vram pages */
 873	if (adev->flags & AMD_IS_APU) {
 874		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
 875
 876		tmp <<= 22;
 877		adev->vm_manager.vram_base_offset = tmp;
 878	} else {
 879		adev->vm_manager.vram_base_offset = 0;
 880	}
 881
 882	return 0;
 883}
 884
 885static int gmc_v6_0_sw_fini(void *handle)
 886{
 887	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 888
 889	amdgpu_gem_force_release(adev);
 890	amdgpu_vm_manager_fini(adev);
 891	amdgpu_gart_table_vram_free(adev);
 892	amdgpu_bo_fini(adev);
 893	amdgpu_ucode_release(&adev->gmc.fw);
 894
 895	return 0;
 896}
 897
 898static int gmc_v6_0_hw_init(void *handle)
 899{
 900	int r;
 901	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 902
 903	gmc_v6_0_mc_program(adev);
 904
 905	if (!(adev->flags & AMD_IS_APU)) {
 906		r = gmc_v6_0_mc_load_microcode(adev);
 907		if (r) {
 908			dev_err(adev->dev, "Failed to load MC firmware!\n");
 909			return r;
 910		}
 911	}
 912
 913	r = gmc_v6_0_gart_enable(adev);
 914	if (r)
 915		return r;
 916
 917	if (amdgpu_emu_mode == 1)
 918		return amdgpu_gmc_vram_checking(adev);
 919
 920	return 0;
 921}
 922
 923static int gmc_v6_0_hw_fini(void *handle)
 924{
 925	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 926
 927	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
 928	gmc_v6_0_gart_disable(adev);
 929
 930	return 0;
 931}
 932
 933static int gmc_v6_0_suspend(void *handle)
 934{
 935	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 936
 937	gmc_v6_0_hw_fini(adev);
 938
 939	return 0;
 940}
 941
 942static int gmc_v6_0_resume(void *handle)
 943{
 944	int r;
 945	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 946
 947	r = gmc_v6_0_hw_init(adev);
 948	if (r)
 949		return r;
 950
 951	amdgpu_vmid_reset_all(adev);
 952
 953	return 0;
 954}
 955
 956static bool gmc_v6_0_is_idle(void *handle)
 957{
 958	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 959	u32 tmp = RREG32(mmSRBM_STATUS);
 960
 961	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
 962		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
 963		return false;
 964
 965	return true;
 966}
 967
 968static int gmc_v6_0_wait_for_idle(void *handle)
 969{
 970	unsigned int i;
 971	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 972
 973	for (i = 0; i < adev->usec_timeout; i++) {
 974		if (gmc_v6_0_is_idle(handle))
 975			return 0;
 976		udelay(1);
 977	}
 978	return -ETIMEDOUT;
 979
 980}
 981
 982static int gmc_v6_0_soft_reset(void *handle)
 983{
 984	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 985	u32 srbm_soft_reset = 0;
 986	u32 tmp = RREG32(mmSRBM_STATUS);
 987
 988	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
 989		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
 990						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
 991
 992	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
 993		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
 994		if (!(adev->flags & AMD_IS_APU))
 995			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
 996							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
 997	}
 998
 999	if (srbm_soft_reset) {
1000		gmc_v6_0_mc_stop(adev);
1001		if (gmc_v6_0_wait_for_idle(adev))
 
1002			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1003
1004		tmp = RREG32(mmSRBM_SOFT_RESET);
1005		tmp |= srbm_soft_reset;
1006		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1007		WREG32(mmSRBM_SOFT_RESET, tmp);
1008		tmp = RREG32(mmSRBM_SOFT_RESET);
1009
1010		udelay(50);
1011
1012		tmp &= ~srbm_soft_reset;
1013		WREG32(mmSRBM_SOFT_RESET, tmp);
1014		tmp = RREG32(mmSRBM_SOFT_RESET);
1015
1016		udelay(50);
1017
1018		gmc_v6_0_mc_resume(adev);
1019		udelay(50);
1020	}
1021
1022	return 0;
1023}
1024
1025static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1026					     struct amdgpu_irq_src *src,
1027					     unsigned int type,
1028					     enum amdgpu_interrupt_state state)
1029{
1030	u32 tmp;
1031	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1032		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1033		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1034		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1035		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1036		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1037
1038	switch (state) {
1039	case AMDGPU_IRQ_STATE_DISABLE:
1040		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1041		tmp &= ~bits;
1042		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1043		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1044		tmp &= ~bits;
1045		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1046		break;
1047	case AMDGPU_IRQ_STATE_ENABLE:
1048		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1049		tmp |= bits;
1050		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1051		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1052		tmp |= bits;
1053		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1054		break;
1055	default:
1056		break;
1057	}
1058
1059	return 0;
1060}
1061
1062static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
1063				      struct amdgpu_irq_src *source,
1064				      struct amdgpu_iv_entry *entry)
1065{
1066	u32 addr, status;
1067
1068	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1069	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1070	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1071
1072	if (!addr && !status)
1073		return 0;
1074
1075	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1076		gmc_v6_0_set_fault_enable_default(adev, false);
1077
1078	if (printk_ratelimit()) {
1079		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1080			entry->src_id, entry->src_data[0]);
1081		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1082			addr);
1083		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1084			status);
1085		gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
1086	}
1087
1088	return 0;
1089}
1090
1091static int gmc_v6_0_set_clockgating_state(void *handle,
1092					  enum amd_clockgating_state state)
1093{
1094	return 0;
1095}
1096
1097static int gmc_v6_0_set_powergating_state(void *handle,
1098					  enum amd_powergating_state state)
1099{
1100	return 0;
1101}
1102
1103static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
1104	.name = "gmc_v6_0",
1105	.early_init = gmc_v6_0_early_init,
1106	.late_init = gmc_v6_0_late_init,
1107	.sw_init = gmc_v6_0_sw_init,
1108	.sw_fini = gmc_v6_0_sw_fini,
1109	.hw_init = gmc_v6_0_hw_init,
1110	.hw_fini = gmc_v6_0_hw_fini,
1111	.suspend = gmc_v6_0_suspend,
1112	.resume = gmc_v6_0_resume,
1113	.is_idle = gmc_v6_0_is_idle,
1114	.wait_for_idle = gmc_v6_0_wait_for_idle,
1115	.soft_reset = gmc_v6_0_soft_reset,
1116	.set_clockgating_state = gmc_v6_0_set_clockgating_state,
1117	.set_powergating_state = gmc_v6_0_set_powergating_state,
1118};
1119
1120static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
1121	.flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
1122	.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
1123	.set_prt = gmc_v6_0_set_prt,
1124	.get_vm_pde = gmc_v6_0_get_vm_pde,
1125	.get_vm_pte = gmc_v6_0_get_vm_pte,
1126	.get_vbios_fb_size = gmc_v6_0_get_vbios_fb_size,
1127};
1128
1129static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
1130	.set = gmc_v6_0_vm_fault_interrupt_state,
1131	.process = gmc_v6_0_process_interrupt,
1132};
1133
1134static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
1135{
1136	adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
1137}
1138
1139static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1140{
1141	adev->gmc.vm_fault.num_types = 1;
1142	adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
1143}
1144
1145const struct amdgpu_ip_block_version gmc_v6_0_ip_block = {
1146	.type = AMD_IP_BLOCK_TYPE_GMC,
1147	.major = 6,
1148	.minor = 0,
1149	.rev = 0,
1150	.funcs = &gmc_v6_0_ip_funcs,
1151};
v6.13.7
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25#include <linux/module.h>
  26#include <linux/pci.h>
  27
  28#include <drm/drm_cache.h>
  29#include "amdgpu.h"
  30#include "gmc_v6_0.h"
  31#include "amdgpu_ucode.h"
  32#include "amdgpu_gem.h"
  33
  34#include "bif/bif_3_0_d.h"
  35#include "bif/bif_3_0_sh_mask.h"
  36#include "oss/oss_1_0_d.h"
  37#include "oss/oss_1_0_sh_mask.h"
  38#include "gmc/gmc_6_0_d.h"
  39#include "gmc/gmc_6_0_sh_mask.h"
  40#include "dce/dce_6_0_d.h"
  41#include "dce/dce_6_0_sh_mask.h"
  42#include "si_enums.h"
  43
  44static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
  45static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
  46static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
  47
  48MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
  49MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
  50MODULE_FIRMWARE("amdgpu/verde_mc.bin");
  51MODULE_FIRMWARE("amdgpu/oland_mc.bin");
  52MODULE_FIRMWARE("amdgpu/hainan_mc.bin");
  53MODULE_FIRMWARE("amdgpu/si58_mc.bin");
  54
  55#define MC_SEQ_MISC0__MT__MASK   0xf0000000
  56#define MC_SEQ_MISC0__MT__GDDR1  0x10000000
  57#define MC_SEQ_MISC0__MT__DDR2   0x20000000
  58#define MC_SEQ_MISC0__MT__GDDR3  0x30000000
  59#define MC_SEQ_MISC0__MT__GDDR4  0x40000000
  60#define MC_SEQ_MISC0__MT__GDDR5  0x50000000
  61#define MC_SEQ_MISC0__MT__HBM    0x60000000
  62#define MC_SEQ_MISC0__MT__DDR3   0xB0000000
  63
  64static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
  65{
  66	u32 blackout;
  67	struct amdgpu_ip_block *ip_block;
  68
  69	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
  70	if (!ip_block)
  71		return;
  72
  73	gmc_v6_0_wait_for_idle(ip_block);
  74
  75	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  76	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
  77		/* Block CPU access */
  78		WREG32(mmBIF_FB_EN, 0);
  79		/* blackout the MC */
  80		blackout = REG_SET_FIELD(blackout,
  81					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  82		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
  83	}
  84	/* wait for the MC to settle */
  85	udelay(100);
  86
  87}
  88
  89static void gmc_v6_0_mc_resume(struct amdgpu_device *adev)
  90{
  91	u32 tmp;
  92
  93	/* unblackout the MC */
  94	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  95	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  96	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
  97	/* allow CPU access */
  98	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
  99	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
 100	WREG32(mmBIF_FB_EN, tmp);
 101}
 102
 103static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
 104{
 105	const char *chip_name;
 
 106	int err;
 
 107
 108	DRM_DEBUG("\n");
 109
 110	switch (adev->asic_type) {
 111	case CHIP_TAHITI:
 112		chip_name = "tahiti";
 113		break;
 114	case CHIP_PITCAIRN:
 115		chip_name = "pitcairn";
 116		break;
 117	case CHIP_VERDE:
 118		chip_name = "verde";
 119		break;
 120	case CHIP_OLAND:
 121		chip_name = "oland";
 122		break;
 123	case CHIP_HAINAN:
 124		chip_name = "hainan";
 125		break;
 126	default:
 127		BUG();
 128	}
 129
 130	/* this memory configuration requires special firmware */
 131	if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
 132		chip_name = "si58";
 133
 134	err = amdgpu_ucode_request(adev, &adev->gmc.fw, "amdgpu/%s_mc.bin", chip_name);
 
 
 
 
 135	if (err) {
 136		dev_err(adev->dev,
 137		       "si_mc: Failed to load firmware \"%s_mc.bin\"\n",
 138		       chip_name);
 139		amdgpu_ucode_release(&adev->gmc.fw);
 140	}
 141	return err;
 142}
 143
 144static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
 145{
 146	const __le32 *new_fw_data = NULL;
 147	u32 running;
 148	const __le32 *new_io_mc_regs = NULL;
 149	int i, regs_size, ucode_size;
 150	const struct mc_firmware_header_v1_0 *hdr;
 151
 152	if (!adev->gmc.fw)
 153		return -EINVAL;
 154
 155	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
 156
 157	amdgpu_ucode_print_mc_hdr(&hdr->header);
 158
 159	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 160	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
 161	new_io_mc_regs = (const __le32 *)
 162		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
 163	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 164	new_fw_data = (const __le32 *)
 165		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 166
 167	running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
 168
 169	if (running == 0) {
 170
 171		/* reset the engine and set to writable */
 172		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 173		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
 174
 175		/* load mc io regs */
 176		for (i = 0; i < regs_size; i++) {
 177			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
 178			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
 179		}
 180		/* load the MC ucode */
 181		for (i = 0; i < ucode_size; i++)
 182			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
 183
 184		/* put the engine back into the active state */
 185		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 186		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
 187		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
 188
 189		/* wait for training to complete */
 190		for (i = 0; i < adev->usec_timeout; i++) {
 191			if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK)
 192				break;
 193			udelay(1);
 194		}
 195		for (i = 0; i < adev->usec_timeout; i++) {
 196			if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK)
 197				break;
 198			udelay(1);
 199		}
 200
 201	}
 202
 203	return 0;
 204}
 205
 206static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
 207				       struct amdgpu_gmc *mc)
 208{
 209	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
 210
 211	base <<= 24;
 212
 213	amdgpu_gmc_set_agp_default(adev, mc);
 214	amdgpu_gmc_vram_location(adev, mc, base);
 215	amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
 216}
 217
 218static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
 219{
 220	int i, j;
 221	struct amdgpu_ip_block *ip_block;
 222
 223
 224	/* Initialize HDP */
 225	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
 226		WREG32((0xb05 + j), 0x00000000);
 227		WREG32((0xb06 + j), 0x00000000);
 228		WREG32((0xb07 + j), 0x00000000);
 229		WREG32((0xb08 + j), 0x00000000);
 230		WREG32((0xb09 + j), 0x00000000);
 231	}
 232	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 233
 234	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
 235	if (!ip_block)
 236		return;
 237
 238	if (gmc_v6_0_wait_for_idle(ip_block))
 239		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 240
 241	if (adev->mode_info.num_crtc) {
 242		u32 tmp;
 243
 244		/* Lockout access through VGA aperture*/
 245		tmp = RREG32(mmVGA_HDP_CONTROL);
 246		tmp |= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK;
 247		WREG32(mmVGA_HDP_CONTROL, tmp);
 248
 249		/* disable VGA render */
 250		tmp = RREG32(mmVGA_RENDER_CONTROL);
 251		tmp &= ~VGA_VSTATUS_CNTL;
 252		WREG32(mmVGA_RENDER_CONTROL, tmp);
 253	}
 254	/* Update configuration */
 255	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 256	       adev->gmc.vram_start >> 12);
 257	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 258	       adev->gmc.vram_end >> 12);
 259	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
 260	       adev->mem_scratch.gpu_addr >> 12);
 261	WREG32(mmMC_VM_AGP_BASE, 0);
 262	WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22);
 263	WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22);
 264
 265	if (gmc_v6_0_wait_for_idle(ip_block))
 266		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 267}
 268
 269static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
 270{
 271
 272	u32 tmp;
 273	int chansize, numchan;
 274	int r;
 275
 276	tmp = RREG32(mmMC_ARB_RAMCFG);
 277	if (tmp & (1 << 11))
 278		chansize = 16;
 279	else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK)
 280		chansize = 64;
 281	else
 282		chansize = 32;
 283
 284	tmp = RREG32(mmMC_SHARED_CHMAP);
 285	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 286	case 0:
 287	default:
 288		numchan = 1;
 289		break;
 290	case 1:
 291		numchan = 2;
 292		break;
 293	case 2:
 294		numchan = 4;
 295		break;
 296	case 3:
 297		numchan = 8;
 298		break;
 299	case 4:
 300		numchan = 3;
 301		break;
 302	case 5:
 303		numchan = 6;
 304		break;
 305	case 6:
 306		numchan = 10;
 307		break;
 308	case 7:
 309		numchan = 12;
 310		break;
 311	case 8:
 312		numchan = 16;
 313		break;
 314	}
 315	adev->gmc.vram_width = numchan * chansize;
 316	/* size in MB on si */
 317	adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 318	adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 319
 320	if (!(adev->flags & AMD_IS_APU)) {
 321		r = amdgpu_device_resize_fb_bar(adev);
 322		if (r)
 323			return r;
 324	}
 325	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
 326	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 327	adev->gmc.visible_vram_size = adev->gmc.aper_size;
 328
 329	/* set the gart size */
 330	if (amdgpu_gart_size == -1) {
 331		switch (adev->asic_type) {
 332		case CHIP_HAINAN:    /* no MM engines */
 333		default:
 334			adev->gmc.gart_size = 256ULL << 20;
 335			break;
 336		case CHIP_VERDE:    /* UVD, VCE do not support GPUVM */
 337		case CHIP_TAHITI:   /* UVD, VCE do not support GPUVM */
 338		case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */
 339		case CHIP_OLAND:    /* UVD, VCE do not support GPUVM */
 340			adev->gmc.gart_size = 1024ULL << 20;
 341			break;
 342		}
 343	} else {
 344		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
 345	}
 346
 347	adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
 348	gmc_v6_0_vram_gtt_location(adev, &adev->gmc);
 349
 350	return 0;
 351}
 352
 353static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 354					uint32_t vmhub, uint32_t flush_type)
 355{
 356	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 357}
 358
 359static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 360					    unsigned int vmid, uint64_t pd_addr)
 361{
 362	uint32_t reg;
 363
 364	/* write new base address */
 365	if (vmid < 8)
 366		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
 367	else
 368		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8);
 369	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
 370
 371	/* bits 0-15 are the VM contexts0-15 */
 372	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
 373
 374	return pd_addr;
 375}
 376
 377static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
 378				uint64_t *addr, uint64_t *flags)
 379{
 380	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
 381}
 382
 383static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
 384				struct amdgpu_bo_va_mapping *mapping,
 385				uint64_t *flags)
 386{
 387	*flags &= ~AMDGPU_PTE_EXECUTABLE;
 388	*flags &= ~AMDGPU_PTE_PRT;
 389}
 390
 391static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
 392					      bool value)
 393{
 394	u32 tmp;
 395
 396	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 397	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 398			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 399	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 400			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 401	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 402			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 403	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 404			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 405	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 406			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 407	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 408			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 409	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 410}
 411
 412 /**
 413  * gmc_v8_0_set_prt() - set PRT VM fault
 414  *
 415  * @adev: amdgpu_device pointer
 416  * @enable: enable/disable VM fault handling for PRT
 417  */
 418static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
 419{
 420	u32 tmp;
 421
 422	if (enable && !adev->gmc.prt_warning) {
 423		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
 424		adev->gmc.prt_warning = true;
 425	}
 426
 427	tmp = RREG32(mmVM_PRT_CNTL);
 428	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 429			    CB_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
 430			    enable);
 431	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 432			    TC_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
 433			    enable);
 434	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 435			    L2_CACHE_STORE_INVALID_ENTRIES,
 436			    enable);
 437	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 438			    L1_TLB_STORE_INVALID_ENTRIES,
 439			    enable);
 440	WREG32(mmVM_PRT_CNTL, tmp);
 441
 442	if (enable) {
 443		uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >>
 444			AMDGPU_GPU_PAGE_SHIFT;
 445		uint32_t high = adev->vm_manager.max_pfn -
 446			(AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT);
 447
 448		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
 449		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
 450		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
 451		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
 452		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
 453		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
 454		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
 455		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
 456	} else {
 457		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
 458		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
 459		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
 460		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
 461		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
 462		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
 463		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
 464		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
 465	}
 466}
 467
 468static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 469{
 470	uint64_t table_addr;
 471	u32 field;
 472	int i;
 473
 474	if (adev->gart.bo == NULL) {
 475		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 476		return -EINVAL;
 477	}
 478	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
 479
 480	table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
 481
 482	/* Setup TLB control */
 483	WREG32(mmMC_VM_MX_L1_TLB_CNTL,
 484	       (0xA << 7) |
 485	       MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK |
 486	       MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK |
 487	       MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
 488	       MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK |
 489	       (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
 490	/* Setup L2 cache */
 491	WREG32(mmVM_L2_CNTL,
 492	       VM_L2_CNTL__ENABLE_L2_CACHE_MASK |
 493	       VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK |
 494	       VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 495	       VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 496	       (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
 497	       (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
 498	WREG32(mmVM_L2_CNTL2,
 499	       VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
 500	       VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
 501
 502	field = adev->vm_manager.fragment_size;
 503	WREG32(mmVM_L2_CNTL3,
 504	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
 505	       (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
 506	       (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
 507	/* setup context0 */
 508	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
 509	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
 510	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
 511	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 512			(u32)(adev->dummy_page_addr >> 12));
 513	WREG32(mmVM_CONTEXT0_CNTL2, 0);
 514	WREG32(mmVM_CONTEXT0_CNTL,
 515	       VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
 516	       (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
 517	       VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
 518
 519	WREG32(0x575, 0);
 520	WREG32(0x576, 0);
 521	WREG32(0x577, 0);
 522
 523	/* empty context1-15 */
 524	/* set vm size, must be a multiple of 4 */
 525	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
 526	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
 527	/* Assign the pt base to something valid for now; the pts used for
 528	 * the VMs are determined by the application and setup and assigned
 529	 * on the fly in the vm part of radeon_gart.c
 530	 */
 531	for (i = 1; i < AMDGPU_NUM_VMID; i++) {
 532		if (i < 8)
 533			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
 534			       table_addr >> 12);
 535		else
 536			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
 537			       table_addr >> 12);
 538	}
 539
 540	/* enable context1-15 */
 541	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
 542	       (u32)(adev->dummy_page_addr >> 12));
 543	WREG32(mmVM_CONTEXT1_CNTL2, 4);
 544	WREG32(mmVM_CONTEXT1_CNTL,
 545	       VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
 546	       (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
 547	       ((adev->vm_manager.block_size - 9)
 548	       << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
 549	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
 550		gmc_v6_0_set_fault_enable_default(adev, false);
 551	else
 552		gmc_v6_0_set_fault_enable_default(adev, true);
 553
 554	gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0);
 555	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
 556		 (unsigned int)(adev->gmc.gart_size >> 20),
 557		 (unsigned long long)table_addr);
 558	return 0;
 559}
 560
 561static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
 562{
 563	int r;
 564
 565	if (adev->gart.bo) {
 566		dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
 567		return 0;
 568	}
 569	r = amdgpu_gart_init(adev);
 570	if (r)
 571		return r;
 572	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 573	adev->gart.gart_pte_flags = 0;
 574	return amdgpu_gart_table_vram_alloc(adev);
 575}
 576
 577static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
 578{
 579	/*unsigned i;
 580
 581	for (i = 1; i < 16; ++i) {
 582		uint32_t reg;
 583		if (i < 8)
 584			reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ;
 585		else
 586			reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8);
 587		adev->vm_manager.saved_table_addr[i] = RREG32(reg);
 588	}*/
 589
 590	/* Disable all tables */
 591	WREG32(mmVM_CONTEXT0_CNTL, 0);
 592	WREG32(mmVM_CONTEXT1_CNTL, 0);
 593	/* Setup TLB control */
 594	WREG32(mmMC_VM_MX_L1_TLB_CNTL,
 595	       MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
 596	       (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
 597	/* Setup L2 cache */
 598	WREG32(mmVM_L2_CNTL,
 599	       VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 600	       VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
 601	       (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
 602	       (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
 603	WREG32(mmVM_L2_CNTL2, 0);
 604	WREG32(mmVM_L2_CNTL3,
 605	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
 606	       (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
 607}
 608
 609static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
 610				     u32 status, u32 addr, u32 mc_client)
 611{
 612	u32 mc_id;
 613	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
 614	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 615					PROTECTIONS);
 616	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
 617		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
 618
 619	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 620			      MEMORY_CLIENT_ID);
 621
 622	dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
 623	       protections, vmid, addr,
 624	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 625			     MEMORY_CLIENT_RW) ?
 626	       "write" : "read", block, mc_client, mc_id);
 627}
 628
 629/*
 630static const u32 mc_cg_registers[] = {
 631	MC_HUB_MISC_HUB_CG,
 632	MC_HUB_MISC_SIP_CG,
 633	MC_HUB_MISC_VM_CG,
 634	MC_XPB_CLK_GAT,
 635	ATC_MISC_CG,
 636	MC_CITF_MISC_WR_CG,
 637	MC_CITF_MISC_RD_CG,
 638	MC_CITF_MISC_VM_CG,
 639	VM_L2_CG,
 640};
 641
 642static const u32 mc_cg_ls_en[] = {
 643	MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
 644	MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
 645	MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
 646	MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
 647	ATC_MISC_CG__MEM_LS_ENABLE_MASK,
 648	MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
 649	MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
 650	MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
 651	VM_L2_CG__MEM_LS_ENABLE_MASK,
 652};
 653
 654static const u32 mc_cg_en[] = {
 655	MC_HUB_MISC_HUB_CG__ENABLE_MASK,
 656	MC_HUB_MISC_SIP_CG__ENABLE_MASK,
 657	MC_HUB_MISC_VM_CG__ENABLE_MASK,
 658	MC_XPB_CLK_GAT__ENABLE_MASK,
 659	ATC_MISC_CG__ENABLE_MASK,
 660	MC_CITF_MISC_WR_CG__ENABLE_MASK,
 661	MC_CITF_MISC_RD_CG__ENABLE_MASK,
 662	MC_CITF_MISC_VM_CG__ENABLE_MASK,
 663	VM_L2_CG__ENABLE_MASK,
 664};
 665
 666static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
 667				  bool enable)
 668{
 669	int i;
 670	u32 orig, data;
 671
 672	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 673		orig = data = RREG32(mc_cg_registers[i]);
 674		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
 675			data |= mc_cg_ls_en[i];
 676		else
 677			data &= ~mc_cg_ls_en[i];
 678		if (data != orig)
 679			WREG32(mc_cg_registers[i], data);
 680	}
 681}
 682
 683static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
 684				    bool enable)
 685{
 686	int i;
 687	u32 orig, data;
 688
 689	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 690		orig = data = RREG32(mc_cg_registers[i]);
 691		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
 692			data |= mc_cg_en[i];
 693		else
 694			data &= ~mc_cg_en[i];
 695		if (data != orig)
 696			WREG32(mc_cg_registers[i], data);
 697	}
 698}
 699
 700static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
 701				     bool enable)
 702{
 703	u32 orig, data;
 704
 705	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
 706
 707	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
 708		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
 709		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
 710		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
 711		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
 712	} else {
 713		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
 714		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
 715		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
 716		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
 717	}
 718
 719	if (orig != data)
 720		WREG32_PCIE(ixPCIE_CNTL2, data);
 721}
 722
 723static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
 724				     bool enable)
 725{
 726	u32 orig, data;
 727
 728	orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
 729
 730	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
 731		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
 732	else
 733		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
 734
 735	if (orig != data)
 736		WREG32(mmHDP_HOST_PATH_CNTL, data);
 737}
 738
 739static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
 740				   bool enable)
 741{
 742	u32 orig, data;
 743
 744	orig = data = RREG32(mmHDP_MEM_POWER_LS);
 745
 746	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
 747		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
 748	else
 749		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
 750
 751	if (orig != data)
 752		WREG32(mmHDP_MEM_POWER_LS, data);
 753}
 754*/
 755
 756static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
 757{
 758	switch (mc_seq_vram_type) {
 759	case MC_SEQ_MISC0__MT__GDDR1:
 760		return AMDGPU_VRAM_TYPE_GDDR1;
 761	case MC_SEQ_MISC0__MT__DDR2:
 762		return AMDGPU_VRAM_TYPE_DDR2;
 763	case MC_SEQ_MISC0__MT__GDDR3:
 764		return AMDGPU_VRAM_TYPE_GDDR3;
 765	case MC_SEQ_MISC0__MT__GDDR4:
 766		return AMDGPU_VRAM_TYPE_GDDR4;
 767	case MC_SEQ_MISC0__MT__GDDR5:
 768		return AMDGPU_VRAM_TYPE_GDDR5;
 769	case MC_SEQ_MISC0__MT__DDR3:
 770		return AMDGPU_VRAM_TYPE_DDR3;
 771	default:
 772		return AMDGPU_VRAM_TYPE_UNKNOWN;
 773	}
 774}
 775
 776static int gmc_v6_0_early_init(struct amdgpu_ip_block *ip_block)
 777{
 778	struct amdgpu_device *adev = ip_block->adev;
 779
 780	gmc_v6_0_set_gmc_funcs(adev);
 781	gmc_v6_0_set_irq_funcs(adev);
 782
 783	return 0;
 784}
 785
 786static int gmc_v6_0_late_init(struct amdgpu_ip_block *ip_block)
 787{
 788	struct amdgpu_device *adev = ip_block->adev;
 789
 790	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
 791		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
 792	else
 793		return 0;
 794}
 795
 796static unsigned int gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
 797{
 798	u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
 799	unsigned int size;
 800
 801	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
 802		size = AMDGPU_VBIOS_VGA_ALLOCATION;
 803	} else {
 804		u32 viewport = RREG32(mmVIEWPORT_SIZE);
 805
 806		size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
 807			REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
 808			4);
 809	}
 810	return size;
 811}
 812
 813static int gmc_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
 814{
 815	int r;
 816	struct amdgpu_device *adev = ip_block->adev;
 817
 818	set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
 819
 820	if (adev->flags & AMD_IS_APU) {
 821		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
 822	} else {
 823		u32 tmp = RREG32(mmMC_SEQ_MISC0);
 824
 825		tmp &= MC_SEQ_MISC0__MT__MASK;
 826		adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
 827	}
 828
 829	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
 830	if (r)
 831		return r;
 832
 833	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
 834	if (r)
 835		return r;
 836
 837	amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
 838
 839	adev->gmc.mc_mask = 0xffffffffffULL;
 840
 841	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
 842	if (r) {
 843		dev_warn(adev->dev, "No suitable DMA available.\n");
 844		return r;
 845	}
 846	adev->need_swiotlb = drm_need_swiotlb(40);
 847
 848	r = gmc_v6_0_init_microcode(adev);
 849	if (r) {
 850		dev_err(adev->dev, "Failed to load mc firmware!\n");
 851		return r;
 852	}
 853
 854	r = gmc_v6_0_mc_init(adev);
 855	if (r)
 856		return r;
 857
 858	amdgpu_gmc_get_vbios_allocations(adev);
 859
 860	r = amdgpu_bo_init(adev);
 861	if (r)
 862		return r;
 863
 864	r = gmc_v6_0_gart_init(adev);
 865	if (r)
 866		return r;
 867
 868	/*
 869	 * number of VMs
 870	 * VMID 0 is reserved for System
 871	 * amdgpu graphics/compute will use VMIDs 1-7
 872	 * amdkfd will use VMIDs 8-15
 873	 */
 874	adev->vm_manager.first_kfd_vmid = 8;
 875	amdgpu_vm_manager_init(adev);
 876
 877	/* base offset of vram pages */
 878	if (adev->flags & AMD_IS_APU) {
 879		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
 880
 881		tmp <<= 22;
 882		adev->vm_manager.vram_base_offset = tmp;
 883	} else {
 884		adev->vm_manager.vram_base_offset = 0;
 885	}
 886
 887	return 0;
 888}
 889
 890static int gmc_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
 891{
 892	struct amdgpu_device *adev = ip_block->adev;
 893
 894	amdgpu_gem_force_release(adev);
 895	amdgpu_vm_manager_fini(adev);
 896	amdgpu_gart_table_vram_free(adev);
 897	amdgpu_bo_fini(adev);
 898	amdgpu_ucode_release(&adev->gmc.fw);
 899
 900	return 0;
 901}
 902
 903static int gmc_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
 904{
 905	int r;
 906	struct amdgpu_device *adev = ip_block->adev;
 907
 908	gmc_v6_0_mc_program(adev);
 909
 910	if (!(adev->flags & AMD_IS_APU)) {
 911		r = gmc_v6_0_mc_load_microcode(adev);
 912		if (r) {
 913			dev_err(adev->dev, "Failed to load MC firmware!\n");
 914			return r;
 915		}
 916	}
 917
 918	r = gmc_v6_0_gart_enable(adev);
 919	if (r)
 920		return r;
 921
 922	if (amdgpu_emu_mode == 1)
 923		return amdgpu_gmc_vram_checking(adev);
 924
 925	return 0;
 926}
 927
 928static int gmc_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
 929{
 930	struct amdgpu_device *adev = ip_block->adev;
 931
 932	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
 933	gmc_v6_0_gart_disable(adev);
 934
 935	return 0;
 936}
 937
 938static int gmc_v6_0_suspend(struct amdgpu_ip_block *ip_block)
 939{
 940	gmc_v6_0_hw_fini(ip_block);
 
 
 941
 942	return 0;
 943}
 944
 945static int gmc_v6_0_resume(struct amdgpu_ip_block *ip_block)
 946{
 947	int r;
 948	struct amdgpu_device *adev = ip_block->adev;
 949
 950	r = gmc_v6_0_hw_init(ip_block);
 951	if (r)
 952		return r;
 953
 954	amdgpu_vmid_reset_all(adev);
 955
 956	return 0;
 957}
 958
 959static bool gmc_v6_0_is_idle(void *handle)
 960{
 961	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 962
 963	u32 tmp = RREG32(mmSRBM_STATUS);
 964
 965	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
 966		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
 967		return false;
 968
 969	return true;
 970}
 971
 972static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
 973{
 974	unsigned int i;
 975	struct amdgpu_device *adev = ip_block->adev;
 976
 977	for (i = 0; i < adev->usec_timeout; i++) {
 978		if (gmc_v6_0_is_idle(adev))
 979			return 0;
 980		udelay(1);
 981	}
 982	return -ETIMEDOUT;
 983
 984}
 985
 986static int gmc_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
 987{
 988	struct amdgpu_device *adev = ip_block->adev;
 989
 990	u32 srbm_soft_reset = 0;
 991	u32 tmp = RREG32(mmSRBM_STATUS);
 992
 993	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
 994		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
 995						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
 996
 997	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
 998		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
 999		if (!(adev->flags & AMD_IS_APU))
1000			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1001							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1002	}
1003
1004	if (srbm_soft_reset) {
1005		gmc_v6_0_mc_stop(adev);
1006
1007		if (gmc_v6_0_wait_for_idle(ip_block))
1008			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1009
1010		tmp = RREG32(mmSRBM_SOFT_RESET);
1011		tmp |= srbm_soft_reset;
1012		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1013		WREG32(mmSRBM_SOFT_RESET, tmp);
1014		tmp = RREG32(mmSRBM_SOFT_RESET);
1015
1016		udelay(50);
1017
1018		tmp &= ~srbm_soft_reset;
1019		WREG32(mmSRBM_SOFT_RESET, tmp);
1020		tmp = RREG32(mmSRBM_SOFT_RESET);
1021
1022		udelay(50);
1023
1024		gmc_v6_0_mc_resume(adev);
1025		udelay(50);
1026	}
1027
1028	return 0;
1029}
1030
1031static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1032					     struct amdgpu_irq_src *src,
1033					     unsigned int type,
1034					     enum amdgpu_interrupt_state state)
1035{
1036	u32 tmp;
1037	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1038		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1039		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1040		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1041		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1042		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1043
1044	switch (state) {
1045	case AMDGPU_IRQ_STATE_DISABLE:
1046		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1047		tmp &= ~bits;
1048		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1049		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1050		tmp &= ~bits;
1051		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1052		break;
1053	case AMDGPU_IRQ_STATE_ENABLE:
1054		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1055		tmp |= bits;
1056		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1057		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1058		tmp |= bits;
1059		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1060		break;
1061	default:
1062		break;
1063	}
1064
1065	return 0;
1066}
1067
1068static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
1069				      struct amdgpu_irq_src *source,
1070				      struct amdgpu_iv_entry *entry)
1071{
1072	u32 addr, status;
1073
1074	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1075	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1076	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1077
1078	if (!addr && !status)
1079		return 0;
1080
1081	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1082		gmc_v6_0_set_fault_enable_default(adev, false);
1083
1084	if (printk_ratelimit()) {
1085		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1086			entry->src_id, entry->src_data[0]);
1087		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1088			addr);
1089		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1090			status);
1091		gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
1092	}
1093
1094	return 0;
1095}
1096
1097static int gmc_v6_0_set_clockgating_state(void *handle,
1098					  enum amd_clockgating_state state)
1099{
1100	return 0;
1101}
1102
1103static int gmc_v6_0_set_powergating_state(void *handle,
1104					  enum amd_powergating_state state)
1105{
1106	return 0;
1107}
1108
1109static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
1110	.name = "gmc_v6_0",
1111	.early_init = gmc_v6_0_early_init,
1112	.late_init = gmc_v6_0_late_init,
1113	.sw_init = gmc_v6_0_sw_init,
1114	.sw_fini = gmc_v6_0_sw_fini,
1115	.hw_init = gmc_v6_0_hw_init,
1116	.hw_fini = gmc_v6_0_hw_fini,
1117	.suspend = gmc_v6_0_suspend,
1118	.resume = gmc_v6_0_resume,
1119	.is_idle = gmc_v6_0_is_idle,
1120	.wait_for_idle = gmc_v6_0_wait_for_idle,
1121	.soft_reset = gmc_v6_0_soft_reset,
1122	.set_clockgating_state = gmc_v6_0_set_clockgating_state,
1123	.set_powergating_state = gmc_v6_0_set_powergating_state,
1124};
1125
1126static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
1127	.flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
1128	.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
1129	.set_prt = gmc_v6_0_set_prt,
1130	.get_vm_pde = gmc_v6_0_get_vm_pde,
1131	.get_vm_pte = gmc_v6_0_get_vm_pte,
1132	.get_vbios_fb_size = gmc_v6_0_get_vbios_fb_size,
1133};
1134
1135static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
1136	.set = gmc_v6_0_vm_fault_interrupt_state,
1137	.process = gmc_v6_0_process_interrupt,
1138};
1139
1140static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
1141{
1142	adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
1143}
1144
1145static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1146{
1147	adev->gmc.vm_fault.num_types = 1;
1148	adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
1149}
1150
1151const struct amdgpu_ip_block_version gmc_v6_0_ip_block = {
1152	.type = AMD_IP_BLOCK_TYPE_GMC,
1153	.major = 6,
1154	.minor = 0,
1155	.rev = 0,
1156	.funcs = &gmc_v6_0_ip_funcs,
1157};