Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v4.6
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include <linux/firmware.h>
  24#include "drmP.h"
 
  25#include "amdgpu.h"
  26#include "cikd.h"
  27#include "cik.h"
  28#include "gmc_v7_0.h"
  29#include "amdgpu_ucode.h"
  30
  31#include "bif/bif_4_1_d.h"
  32#include "bif/bif_4_1_sh_mask.h"
  33
  34#include "gmc/gmc_7_1_d.h"
  35#include "gmc/gmc_7_1_sh_mask.h"
  36
  37#include "oss/oss_2_0_d.h"
  38#include "oss/oss_2_0_sh_mask.h"
  39
  40static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
 
 
 
 
 
  41static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
 
  42
  43MODULE_FIRMWARE("radeon/bonaire_mc.bin");
  44MODULE_FIRMWARE("radeon/hawaii_mc.bin");
  45MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
  46
  47static const u32 golden_settings_iceland_a11[] =
  48{
  49	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  50	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  51	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  52	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
  53};
  54
  55static const u32 iceland_mgcg_cgcg_init[] =
  56{
  57	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  58};
  59
  60static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
  61{
  62	switch (adev->asic_type) {
  63	case CHIP_TOPAZ:
  64		amdgpu_program_register_sequence(adev,
  65						 iceland_mgcg_cgcg_init,
  66						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
  67		amdgpu_program_register_sequence(adev,
  68						 golden_settings_iceland_a11,
  69						 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
  70		break;
  71	default:
  72		break;
  73	}
  74}
  75
  76/**
  77 * gmc7_mc_wait_for_idle - wait for MC idle callback.
  78 *
  79 * @adev: amdgpu_device pointer
  80 *
  81 * Wait for the MC (memory controller) to be idle.
  82 * (evergreen+).
  83 * Returns 0 if the MC is idle, -1 if not.
  84 */
  85int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev)
  86{
  87	unsigned i;
  88	u32 tmp;
  89
  90	for (i = 0; i < adev->usec_timeout; i++) {
  91		/* read MC_STATUS */
  92		tmp = RREG32(mmSRBM_STATUS) & 0x1F00;
  93		if (!tmp)
  94			return 0;
  95		udelay(1);
  96	}
  97	return -1;
  98}
  99
 100void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
 101		      struct amdgpu_mode_mc_save *save)
 102{
 103	u32 blackout;
 104
 105	if (adev->mode_info.num_crtc)
 106		amdgpu_display_stop_mc_access(adev, save);
 107
 108	amdgpu_asic_wait_for_mc_idle(adev);
 109
 110	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 111	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
 112		/* Block CPU access */
 113		WREG32(mmBIF_FB_EN, 0);
 114		/* blackout the MC */
 115		blackout = REG_SET_FIELD(blackout,
 116					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
 117		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
 118	}
 119	/* wait for the MC to settle */
 120	udelay(100);
 121}
 122
 123void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
 124			struct amdgpu_mode_mc_save *save)
 125{
 126	u32 tmp;
 127
 128	/* unblackout the MC */
 129	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 130	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
 131	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
 132	/* allow CPU access */
 133	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
 134	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
 135	WREG32(mmBIF_FB_EN, tmp);
 136
 137	if (adev->mode_info.num_crtc)
 138		amdgpu_display_resume_mc_access(adev, save);
 139}
 140
 141/**
 142 * gmc_v7_0_init_microcode - load ucode images from disk
 143 *
 144 * @adev: amdgpu_device pointer
 145 *
 146 * Use the firmware interface to load the ucode images into
 147 * the driver (not loaded into hw).
 148 * Returns 0 on success, error on failure.
 149 */
 150static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
 151{
 152	const char *chip_name;
 153	char fw_name[30];
 154	int err;
 155
 156	DRM_DEBUG("\n");
 157
 158	switch (adev->asic_type) {
 159	case CHIP_BONAIRE:
 160		chip_name = "bonaire";
 161		break;
 162	case CHIP_HAWAII:
 163		chip_name = "hawaii";
 164		break;
 165	case CHIP_TOPAZ:
 166		chip_name = "topaz";
 167		break;
 168	case CHIP_KAVERI:
 169	case CHIP_KABINI:
 
 170		return 0;
 171	default: BUG();
 172	}
 173
 174	if (adev->asic_type == CHIP_TOPAZ)
 175		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
 176	else
 177		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
 178
 179	err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
 180	if (err)
 181		goto out;
 182	err = amdgpu_ucode_validate(adev->mc.fw);
 183
 184out:
 185	if (err) {
 186		printk(KERN_ERR
 187		       "cik_mc: Failed to load firmware \"%s\"\n",
 188		       fw_name);
 189		release_firmware(adev->mc.fw);
 190		adev->mc.fw = NULL;
 191	}
 192	return err;
 193}
 194
 195/**
 196 * gmc_v7_0_mc_load_microcode - load MC ucode into the hw
 197 *
 198 * @adev: amdgpu_device pointer
 199 *
 200 * Load the GDDR MC ucode into the hw (CIK).
 201 * Returns 0 on success, error on failure.
 202 */
 203static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
 204{
 205	const struct mc_firmware_header_v1_0 *hdr;
 206	const __le32 *fw_data = NULL;
 207	const __le32 *io_mc_regs = NULL;
 208	u32 running, blackout = 0;
 209	int i, ucode_size, regs_size;
 210
 211	if (!adev->mc.fw)
 212		return -EINVAL;
 213
 214	hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
 215	amdgpu_ucode_print_mc_hdr(&hdr->header);
 216
 217	adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 218	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
 219	io_mc_regs = (const __le32 *)
 220		(adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
 221	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 222	fw_data = (const __le32 *)
 223		(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 224
 225	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
 226
 227	if (running == 0) {
 228		if (running) {
 229			blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 230			WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
 231		}
 232
 233		/* reset the engine and set to writable */
 234		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 235		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
 236
 237		/* load mc io regs */
 238		for (i = 0; i < regs_size; i++) {
 239			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
 240			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
 241		}
 242		/* load the MC ucode */
 243		for (i = 0; i < ucode_size; i++)
 244			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
 245
 246		/* put the engine back into the active state */
 247		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 248		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
 249		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
 250
 251		/* wait for training to complete */
 252		for (i = 0; i < adev->usec_timeout; i++) {
 253			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
 254					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
 255				break;
 256			udelay(1);
 257		}
 258		for (i = 0; i < adev->usec_timeout; i++) {
 259			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
 260					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
 261				break;
 262			udelay(1);
 263		}
 264
 265		if (running)
 266			WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
 267	}
 268
 269	return 0;
 270}
 271
 272static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
 273				       struct amdgpu_mc *mc)
 274{
 275	if (mc->mc_vram_size > 0xFFC0000000ULL) {
 276		/* leave room for at least 1024M GTT */
 277		dev_warn(adev->dev, "limiting VRAM\n");
 278		mc->real_vram_size = 0xFFC0000000ULL;
 279		mc->mc_vram_size = 0xFFC0000000ULL;
 280	}
 281	amdgpu_vram_location(adev, &adev->mc, 0);
 282	adev->mc.gtt_base_align = 0;
 283	amdgpu_gtt_location(adev, mc);
 284}
 285
 286/**
 287 * gmc_v7_0_mc_program - program the GPU memory controller
 288 *
 289 * @adev: amdgpu_device pointer
 290 *
 291 * Set the location of vram, gart, and AGP in the GPU's
 292 * physical address space (CIK).
 293 */
 294static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
 295{
 296	struct amdgpu_mode_mc_save save;
 297	u32 tmp;
 298	int i, j;
 299
 300	/* Initialize HDP */
 301	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
 302		WREG32((0xb05 + j), 0x00000000);
 303		WREG32((0xb06 + j), 0x00000000);
 304		WREG32((0xb07 + j), 0x00000000);
 305		WREG32((0xb08 + j), 0x00000000);
 306		WREG32((0xb09 + j), 0x00000000);
 307	}
 308	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 309
 310	if (adev->mode_info.num_crtc)
 311		amdgpu_display_set_vga_render_state(adev, false);
 312
 313	gmc_v7_0_mc_stop(adev, &save);
 314	if (amdgpu_asic_wait_for_mc_idle(adev)) {
 315		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 316	}
 
 
 
 
 
 
 
 
 
 
 
 317	/* Update configuration */
 318	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 319	       adev->mc.vram_start >> 12);
 320	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 321	       adev->mc.vram_end >> 12);
 322	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
 323	       adev->vram_scratch.gpu_addr >> 12);
 324	tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
 325	tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
 326	WREG32(mmMC_VM_FB_LOCATION, tmp);
 327	/* XXX double check these! */
 328	WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
 329	WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
 330	WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
 331	WREG32(mmMC_VM_AGP_BASE, 0);
 332	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
 333	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
 334	if (amdgpu_asic_wait_for_mc_idle(adev)) {
 335		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 336	}
 337	gmc_v7_0_mc_resume(adev, &save);
 338
 339	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
 340
 341	tmp = RREG32(mmHDP_MISC_CNTL);
 342	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
 343	WREG32(mmHDP_MISC_CNTL, tmp);
 344
 345	tmp = RREG32(mmHDP_HOST_PATH_CNTL);
 346	WREG32(mmHDP_HOST_PATH_CNTL, tmp);
 347}
 348
 349/**
 350 * gmc_v7_0_mc_init - initialize the memory controller driver params
 351 *
 352 * @adev: amdgpu_device pointer
 353 *
 354 * Look up the amount of vram, vram width, and decide how to place
 355 * vram and gart within the GPU's physical address space (CIK).
 356 * Returns 0 for success.
 357 */
 358static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
 359{
 360	u32 tmp;
 361	int chansize, numchan;
 362
 363	/* Get VRAM informations */
 364	tmp = RREG32(mmMC_ARB_RAMCFG);
 365	if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
 366		chansize = 64;
 367	} else {
 368		chansize = 32;
 369	}
 370	tmp = RREG32(mmMC_SHARED_CHMAP);
 371	switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
 372	case 0:
 373	default:
 374		numchan = 1;
 375		break;
 376	case 1:
 377		numchan = 2;
 378		break;
 379	case 2:
 380		numchan = 4;
 381		break;
 382	case 3:
 383		numchan = 8;
 384		break;
 385	case 4:
 386		numchan = 3;
 387		break;
 388	case 5:
 389		numchan = 6;
 390		break;
 391	case 6:
 392		numchan = 10;
 393		break;
 394	case 7:
 395		numchan = 12;
 396		break;
 397	case 8:
 398		numchan = 16;
 399		break;
 
 
 
 
 
 
 
 400	}
 401	adev->mc.vram_width = numchan * chansize;
 402	/* Could aper size report 0 ? */
 403	adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
 404	adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
 405	/* size in MB on si */
 406	adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 407	adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 408	adev->mc.visible_vram_size = adev->mc.aper_size;
 409
 410	/* In case the PCI BAR is larger than the actual amount of vram */
 411	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
 412		adev->mc.visible_vram_size = adev->mc.real_vram_size;
 
 
 
 
 413
 414	/* unless the user had overridden it, set the gart
 415	 * size equal to the 1024 or vram, whichever is larger.
 416	 */
 417	if (amdgpu_gart_size == -1)
 418		adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
 419	else
 420		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 421
 422	gmc_v7_0_vram_gtt_location(adev, &adev->mc);
 423
 424	return 0;
 425}
 426
 427/*
 428 * GART
 429 * VMID 0 is the physical GPU addresses as used by the kernel.
 430 * VMIDs 1-15 are used for userspace clients and are handled
 431 * by the amdgpu vm/hsa code.
 432 */
 433
 434/**
 435 * gmc_v7_0_gart_flush_gpu_tlb - gart tlb flush callback
 436 *
 437 * @adev: amdgpu_device pointer
 438 * @vmid: vm instance to flush
 439 *
 440 * Flush the TLB for the requested page table (CIK).
 441 */
 442static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
 443					uint32_t vmid)
 444{
 445	/* flush hdp cache */
 446	WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
 447
 448	/* bits 0-15 are the VM contexts0-15 */
 449	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 450}
 451
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 452/**
 453 * gmc_v7_0_gart_set_pte_pde - update the page tables using MMIO
 454 *
 455 * @adev: amdgpu_device pointer
 456 * @cpu_pt_addr: cpu address of the page table
 457 * @gpu_page_idx: entry in the page table to update
 458 * @addr: dst addr to write into pte/pde
 459 * @flags: access flags
 460 *
 461 * Update the page tables using the CPU.
 462 */
 463static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev,
 464				     void *cpu_pt_addr,
 465				     uint32_t gpu_page_idx,
 466				     uint64_t addr,
 467				     uint32_t flags)
 468{
 469	void __iomem *ptr = (void *)cpu_pt_addr;
 470	uint64_t value;
 471
 472	value = addr & 0xFFFFFFFFFFFFF000ULL;
 473	value |= flags;
 474	writeq(value, ptr + (gpu_page_idx * 8));
 475
 476	return 0;
 477}
 478
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 479/**
 480 * gmc_v8_0_set_fault_enable_default - update VM fault handling
 481 *
 482 * @adev: amdgpu_device pointer
 483 * @value: true redirects VM faults to the default page
 484 */
 485static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
 486					      bool value)
 487{
 488	u32 tmp;
 489
 490	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 491	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 492			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 493	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 494			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 495	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 496			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 497	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 498			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 499	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 500			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 501	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 502			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 503	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 504}
 505
 506/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 507 * gmc_v7_0_gart_enable - gart enable
 508 *
 509 * @adev: amdgpu_device pointer
 510 *
 511 * This sets up the TLBs, programs the page tables for VMID0,
 512 * sets up the hw for VMIDs 1-15 which are allocated on
 513 * demand, and sets up the global locations for the LDS, GDS,
 514 * and GPUVM for FSA64 clients (CIK).
 515 * Returns 0 for success, errors for failure.
 516 */
 517static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 518{
 519	int r, i;
 520	u32 tmp;
 521
 522	if (adev->gart.robj == NULL) {
 523		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 524		return -EINVAL;
 525	}
 526	r = amdgpu_gart_table_vram_pin(adev);
 527	if (r)
 528		return r;
 529	/* Setup TLB control */
 530	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
 531	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
 532	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
 533	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
 534	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
 535	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
 536	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
 537	/* Setup L2 cache */
 538	tmp = RREG32(mmVM_L2_CNTL);
 539	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
 540	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
 541	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
 542	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
 543	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
 544	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
 545	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
 546	WREG32(mmVM_L2_CNTL, tmp);
 547	tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
 548	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
 549	WREG32(mmVM_L2_CNTL2, tmp);
 
 
 550	tmp = RREG32(mmVM_L2_CNTL3);
 551	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
 552	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
 553	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
 554	WREG32(mmVM_L2_CNTL3, tmp);
 555	/* setup context0 */
 556	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
 557	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
 558	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
 559	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 560			(u32)(adev->dummy_page.addr >> 12));
 561	WREG32(mmVM_CONTEXT0_CNTL2, 0);
 562	tmp = RREG32(mmVM_CONTEXT0_CNTL);
 563	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
 564	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
 565	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 566	WREG32(mmVM_CONTEXT0_CNTL, tmp);
 567
 568	WREG32(0x575, 0);
 569	WREG32(0x576, 0);
 570	WREG32(0x577, 0);
 571
 572	/* empty context1-15 */
 573	/* FIXME start with 4G, once using 2 level pt switch to full
 574	 * vm size space
 575	 */
 576	/* set vm size, must be a multiple of 4 */
 577	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
 578	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
 579	for (i = 1; i < 16; i++) {
 580		if (i < 8)
 581			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
 582			       adev->gart.table_addr >> 12);
 583		else
 584			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
 585			       adev->gart.table_addr >> 12);
 586	}
 587
 588	/* enable context1-15 */
 589	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
 590	       (u32)(adev->dummy_page.addr >> 12));
 591	WREG32(mmVM_CONTEXT1_CNTL2, 4);
 592	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 593	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
 594	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
 595	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
 596			    amdgpu_vm_block_size - 9);
 597	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 598	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
 599		gmc_v7_0_set_fault_enable_default(adev, false);
 600	else
 601		gmc_v7_0_set_fault_enable_default(adev, true);
 602
 603	if (adev->asic_type == CHIP_KAVERI) {
 604		tmp = RREG32(mmCHUB_CONTROL);
 605		tmp &= ~BYPASS_VM;
 606		WREG32(mmCHUB_CONTROL, tmp);
 607	}
 608
 609	gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
 610	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 611		 (unsigned)(adev->mc.gtt_size >> 20),
 612		 (unsigned long long)adev->gart.table_addr);
 613	adev->gart.ready = true;
 614	return 0;
 615}
 616
 617static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
 618{
 619	int r;
 620
 621	if (adev->gart.robj) {
 622		WARN(1, "R600 PCIE GART already initialized\n");
 623		return 0;
 624	}
 625	/* Initialize common gart structure */
 626	r = amdgpu_gart_init(adev);
 627	if (r)
 628		return r;
 629	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 
 630	return amdgpu_gart_table_vram_alloc(adev);
 631}
 632
 633/**
 634 * gmc_v7_0_gart_disable - gart disable
 635 *
 636 * @adev: amdgpu_device pointer
 637 *
 638 * This disables all VM page table (CIK).
 639 */
 640static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
 641{
 642	u32 tmp;
 643
 644	/* Disable all tables */
 645	WREG32(mmVM_CONTEXT0_CNTL, 0);
 646	WREG32(mmVM_CONTEXT1_CNTL, 0);
 647	/* Setup TLB control */
 648	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
 649	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
 650	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
 651	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
 652	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
 653	/* Setup L2 cache */
 654	tmp = RREG32(mmVM_L2_CNTL);
 655	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
 656	WREG32(mmVM_L2_CNTL, tmp);
 657	WREG32(mmVM_L2_CNTL2, 0);
 658	amdgpu_gart_table_vram_unpin(adev);
 659}
 660
 661/**
 662 * gmc_v7_0_gart_fini - vm fini callback
 663 *
 664 * @adev: amdgpu_device pointer
 665 *
 666 * Tears down the driver GART/VM setup (CIK).
 667 */
 668static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
 669{
 670	amdgpu_gart_table_vram_free(adev);
 671	amdgpu_gart_fini(adev);
 672}
 673
 674/*
 675 * vm
 676 * VMID 0 is the physical GPU addresses as used by the kernel.
 677 * VMIDs 1-15 are used for userspace clients and are handled
 678 * by the amdgpu vm/hsa code.
 679 */
 680/**
 681 * gmc_v7_0_vm_init - cik vm init callback
 682 *
 683 * @adev: amdgpu_device pointer
 684 *
 685 * Inits cik specific vm parameters (number of VMs, base of vram for
 686 * VMIDs 1-15) (CIK).
 687 * Returns 0 for success.
 688 */
 689static int gmc_v7_0_vm_init(struct amdgpu_device *adev)
 690{
 691	/*
 692	 * number of VMs
 693	 * VMID 0 is reserved for System
 694	 * amdgpu graphics/compute will use VMIDs 1-7
 695	 * amdkfd will use VMIDs 8-15
 696	 */
 697	adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
 698	amdgpu_vm_manager_init(adev);
 699
 700	/* base offset of vram pages */
 701	if (adev->flags & AMD_IS_APU) {
 702		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
 703		tmp <<= 22;
 704		adev->vm_manager.vram_base_offset = tmp;
 705	} else
 706		adev->vm_manager.vram_base_offset = 0;
 707
 708	return 0;
 709}
 710
 711/**
 712 * gmc_v7_0_vm_fini - cik vm fini callback
 713 *
 714 * @adev: amdgpu_device pointer
 715 *
 716 * Tear down any asic specific VM setup (CIK).
 717 */
 718static void gmc_v7_0_vm_fini(struct amdgpu_device *adev)
 719{
 720}
 721
 722/**
 723 * gmc_v7_0_vm_decode_fault - print human readable fault info
 724 *
 725 * @adev: amdgpu_device pointer
 726 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
 727 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
 728 *
 729 * Print human readable fault information (CIK).
 730 */
 731static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev,
 732				     u32 status, u32 addr, u32 mc_client)
 733{
 734	u32 mc_id;
 735	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
 736	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 737					PROTECTIONS);
 738	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
 739		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
 
 740
 741	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 742			      MEMORY_CLIENT_ID);
 743
 744	printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
 745	       protections, vmid, addr,
 746	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 747			     MEMORY_CLIENT_RW) ?
 748	       "write" : "read", block, mc_client, mc_id);
 749}
 750
 751
 752static const u32 mc_cg_registers[] = {
 753	mmMC_HUB_MISC_HUB_CG,
 754	mmMC_HUB_MISC_SIP_CG,
 755	mmMC_HUB_MISC_VM_CG,
 756	mmMC_XPB_CLK_GAT,
 757	mmATC_MISC_CG,
 758	mmMC_CITF_MISC_WR_CG,
 759	mmMC_CITF_MISC_RD_CG,
 760	mmMC_CITF_MISC_VM_CG,
 761	mmVM_L2_CG,
 762};
 763
 764static const u32 mc_cg_ls_en[] = {
 765	MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
 766	MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
 767	MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
 768	MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
 769	ATC_MISC_CG__MEM_LS_ENABLE_MASK,
 770	MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
 771	MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
 772	MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
 773	VM_L2_CG__MEM_LS_ENABLE_MASK,
 774};
 775
 776static const u32 mc_cg_en[] = {
 777	MC_HUB_MISC_HUB_CG__ENABLE_MASK,
 778	MC_HUB_MISC_SIP_CG__ENABLE_MASK,
 779	MC_HUB_MISC_VM_CG__ENABLE_MASK,
 780	MC_XPB_CLK_GAT__ENABLE_MASK,
 781	ATC_MISC_CG__ENABLE_MASK,
 782	MC_CITF_MISC_WR_CG__ENABLE_MASK,
 783	MC_CITF_MISC_RD_CG__ENABLE_MASK,
 784	MC_CITF_MISC_VM_CG__ENABLE_MASK,
 785	VM_L2_CG__ENABLE_MASK,
 786};
 787
 788static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
 789				  bool enable)
 790{
 791	int i;
 792	u32 orig, data;
 793
 794	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 795		orig = data = RREG32(mc_cg_registers[i]);
 796		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
 797			data |= mc_cg_ls_en[i];
 798		else
 799			data &= ~mc_cg_ls_en[i];
 800		if (data != orig)
 801			WREG32(mc_cg_registers[i], data);
 802	}
 803}
 804
 805static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
 806				    bool enable)
 807{
 808	int i;
 809	u32 orig, data;
 810
 811	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 812		orig = data = RREG32(mc_cg_registers[i]);
 813		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
 814			data |= mc_cg_en[i];
 815		else
 816			data &= ~mc_cg_en[i];
 817		if (data != orig)
 818			WREG32(mc_cg_registers[i], data);
 819	}
 820}
 821
 822static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
 823				     bool enable)
 824{
 825	u32 orig, data;
 826
 827	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
 828
 829	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
 830		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
 831		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
 832		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
 833		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
 834	} else {
 835		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
 836		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
 837		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
 838		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
 839	}
 840
 841	if (orig != data)
 842		WREG32_PCIE(ixPCIE_CNTL2, data);
 843}
 844
 845static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
 846				     bool enable)
 847{
 848	u32 orig, data;
 849
 850	orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
 851
 852	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
 853		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
 854	else
 855		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
 856
 857	if (orig != data)
 858		WREG32(mmHDP_HOST_PATH_CNTL, data);
 859}
 860
 861static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
 862				   bool enable)
 863{
 864	u32 orig, data;
 865
 866	orig = data = RREG32(mmHDP_MEM_POWER_LS);
 867
 868	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
 869		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
 870	else
 871		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
 872
 873	if (orig != data)
 874		WREG32(mmHDP_MEM_POWER_LS, data);
 875}
 876
 877static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type)
 878{
 879	switch (mc_seq_vram_type) {
 880	case MC_SEQ_MISC0__MT__GDDR1:
 881		return AMDGPU_VRAM_TYPE_GDDR1;
 882	case MC_SEQ_MISC0__MT__DDR2:
 883		return AMDGPU_VRAM_TYPE_DDR2;
 884	case MC_SEQ_MISC0__MT__GDDR3:
 885		return AMDGPU_VRAM_TYPE_GDDR3;
 886	case MC_SEQ_MISC0__MT__GDDR4:
 887		return AMDGPU_VRAM_TYPE_GDDR4;
 888	case MC_SEQ_MISC0__MT__GDDR5:
 889		return AMDGPU_VRAM_TYPE_GDDR5;
 890	case MC_SEQ_MISC0__MT__HBM:
 891		return AMDGPU_VRAM_TYPE_HBM;
 892	case MC_SEQ_MISC0__MT__DDR3:
 893		return AMDGPU_VRAM_TYPE_DDR3;
 894	default:
 895		return AMDGPU_VRAM_TYPE_UNKNOWN;
 896	}
 897}
 898
 899static int gmc_v7_0_early_init(void *handle)
 900{
 901	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 902
 903	gmc_v7_0_set_gart_funcs(adev);
 904	gmc_v7_0_set_irq_funcs(adev);
 905
 
 
 
 
 
 
 
 
 906	return 0;
 907}
 908
 909static int gmc_v7_0_late_init(void *handle)
 910{
 911	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 912
 913	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
 914		return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
 915	else
 916		return 0;
 917}
 918
 919static int gmc_v7_0_sw_init(void *handle)
 920{
 921	int r;
 922	int dma_bits;
 923	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 924
 925	if (adev->flags & AMD_IS_APU) {
 926		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
 927	} else {
 928		u32 tmp = RREG32(mmMC_SEQ_MISC0);
 929		tmp &= MC_SEQ_MISC0__MT__MASK;
 930		adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
 931	}
 932
 933	r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
 934	if (r)
 935		return r;
 936
 937	r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
 938	if (r)
 939		return r;
 940
 941	/* Adjust VM size here.
 942	 * Currently set to 4GB ((1 << 20) 4k pages).
 943	 * Max GPUVM size for cayman and SI is 40 bits.
 944	 */
 945	adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
 946
 947	/* Set the internal MC address mask
 948	 * This is the max address of the GPU's
 949	 * internal address space.
 950	 */
 951	adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
 
 
 952
 953	/* set DMA mask + need_dma32 flags.
 954	 * PCIE - can handle 40-bits.
 955	 * IGP - can handle 40-bits
 956	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
 957	 */
 958	adev->need_dma32 = false;
 959	dma_bits = adev->need_dma32 ? 32 : 40;
 960	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
 961	if (r) {
 962		adev->need_dma32 = true;
 963		dma_bits = 32;
 964		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
 965	}
 966	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
 967	if (r) {
 968		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
 969		printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
 970	}
 
 971
 972	r = gmc_v7_0_init_microcode(adev);
 973	if (r) {
 974		DRM_ERROR("Failed to load mc firmware!\n");
 975		return r;
 976	}
 977
 978	r = gmc_v7_0_mc_init(adev);
 979	if (r)
 980		return r;
 981
 982	/* Memory manager */
 983	r = amdgpu_bo_init(adev);
 984	if (r)
 985		return r;
 986
 987	r = gmc_v7_0_gart_init(adev);
 988	if (r)
 989		return r;
 990
 991	if (!adev->vm_manager.enabled) {
 992		r = gmc_v7_0_vm_init(adev);
 993		if (r) {
 994			dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
 995			return r;
 996		}
 997		adev->vm_manager.enabled = true;
 
 
 
 
 
 
 
 
 
 
 998	}
 999
1000	return r;
1001}
1002
1003static int gmc_v7_0_sw_fini(void *handle)
1004{
1005	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1006
1007	if (adev->vm_manager.enabled) {
1008		amdgpu_vm_manager_fini(adev);
1009		gmc_v7_0_vm_fini(adev);
1010		adev->vm_manager.enabled = false;
1011	}
1012	gmc_v7_0_gart_fini(adev);
1013	amdgpu_gem_force_release(adev);
 
 
1014	amdgpu_bo_fini(adev);
 
 
1015
1016	return 0;
1017}
1018
1019static int gmc_v7_0_hw_init(void *handle)
1020{
1021	int r;
1022	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1023
1024	gmc_v7_0_init_golden_registers(adev);
1025
1026	gmc_v7_0_mc_program(adev);
1027
1028	if (!(adev->flags & AMD_IS_APU)) {
1029		r = gmc_v7_0_mc_load_microcode(adev);
1030		if (r) {
1031			DRM_ERROR("Failed to load MC firmware!\n");
1032			return r;
1033		}
1034	}
1035
1036	r = gmc_v7_0_gart_enable(adev);
1037	if (r)
1038		return r;
1039
1040	return r;
1041}
1042
1043static int gmc_v7_0_hw_fini(void *handle)
1044{
1045	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1046
1047	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
1048	gmc_v7_0_gart_disable(adev);
1049
1050	return 0;
1051}
1052
1053static int gmc_v7_0_suspend(void *handle)
1054{
1055	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1056
1057	if (adev->vm_manager.enabled) {
1058		gmc_v7_0_vm_fini(adev);
1059		adev->vm_manager.enabled = false;
1060	}
1061	gmc_v7_0_hw_fini(adev);
1062
1063	return 0;
1064}
1065
1066static int gmc_v7_0_resume(void *handle)
1067{
1068	int r;
1069	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1070
1071	r = gmc_v7_0_hw_init(adev);
1072	if (r)
1073		return r;
1074
1075	if (!adev->vm_manager.enabled) {
1076		r = gmc_v7_0_vm_init(adev);
1077		if (r) {
1078			dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1079			return r;
1080		}
1081		adev->vm_manager.enabled = true;
1082	}
1083
1084	return r;
1085}
1086
1087static bool gmc_v7_0_is_idle(void *handle)
1088{
1089	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1090	u32 tmp = RREG32(mmSRBM_STATUS);
1091
1092	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1093		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1094		return false;
1095
1096	return true;
1097}
1098
1099static int gmc_v7_0_wait_for_idle(void *handle)
1100{
1101	unsigned i;
1102	u32 tmp;
1103	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1104
1105	for (i = 0; i < adev->usec_timeout; i++) {
1106		/* read MC_STATUS */
1107		tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1108					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1109					       SRBM_STATUS__MCC_BUSY_MASK |
1110					       SRBM_STATUS__MCD_BUSY_MASK |
1111					       SRBM_STATUS__VMC_BUSY_MASK);
1112		if (!tmp)
1113			return 0;
1114		udelay(1);
1115	}
1116	return -ETIMEDOUT;
1117
1118}
1119
1120static void gmc_v7_0_print_status(void *handle)
1121{
1122	int i, j;
1123	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1124
1125	dev_info(adev->dev, "GMC 8.x registers\n");
1126	dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
1127		RREG32(mmSRBM_STATUS));
1128	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
1129		RREG32(mmSRBM_STATUS2));
1130
1131	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1132		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
1133	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1134		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
1135	dev_info(adev->dev, "  MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
1136		 RREG32(mmMC_VM_MX_L1_TLB_CNTL));
1137	dev_info(adev->dev, "  VM_L2_CNTL=0x%08X\n",
1138		 RREG32(mmVM_L2_CNTL));
1139	dev_info(adev->dev, "  VM_L2_CNTL2=0x%08X\n",
1140		 RREG32(mmVM_L2_CNTL2));
1141	dev_info(adev->dev, "  VM_L2_CNTL3=0x%08X\n",
1142		 RREG32(mmVM_L2_CNTL3));
1143	dev_info(adev->dev, "  VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
1144		 RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR));
1145	dev_info(adev->dev, "  VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
1146		 RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR));
1147	dev_info(adev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1148		 RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR));
1149	dev_info(adev->dev, "  VM_CONTEXT0_CNTL2=0x%08X\n",
1150		 RREG32(mmVM_CONTEXT0_CNTL2));
1151	dev_info(adev->dev, "  VM_CONTEXT0_CNTL=0x%08X\n",
1152		 RREG32(mmVM_CONTEXT0_CNTL));
1153	dev_info(adev->dev, "  0x15D4=0x%08X\n",
1154		 RREG32(0x575));
1155	dev_info(adev->dev, "  0x15D8=0x%08X\n",
1156		 RREG32(0x576));
1157	dev_info(adev->dev, "  0x15DC=0x%08X\n",
1158		 RREG32(0x577));
1159	dev_info(adev->dev, "  VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
1160		 RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR));
1161	dev_info(adev->dev, "  VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
1162		 RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR));
1163	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1164		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR));
1165	dev_info(adev->dev, "  VM_CONTEXT1_CNTL2=0x%08X\n",
1166		 RREG32(mmVM_CONTEXT1_CNTL2));
1167	dev_info(adev->dev, "  VM_CONTEXT1_CNTL=0x%08X\n",
1168		 RREG32(mmVM_CONTEXT1_CNTL));
1169	for (i = 0; i < 16; i++) {
1170		if (i < 8)
1171			dev_info(adev->dev, "  VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1172				 i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i));
1173		else
1174			dev_info(adev->dev, "  VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1175				 i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8));
1176	}
1177	dev_info(adev->dev, "  MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
1178		 RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR));
1179	dev_info(adev->dev, "  MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
1180		 RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR));
1181	dev_info(adev->dev, "  MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
1182		 RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR));
1183	dev_info(adev->dev, "  MC_VM_FB_LOCATION=0x%08X\n",
1184		 RREG32(mmMC_VM_FB_LOCATION));
1185	dev_info(adev->dev, "  MC_VM_AGP_BASE=0x%08X\n",
1186		 RREG32(mmMC_VM_AGP_BASE));
1187	dev_info(adev->dev, "  MC_VM_AGP_TOP=0x%08X\n",
1188		 RREG32(mmMC_VM_AGP_TOP));
1189	dev_info(adev->dev, "  MC_VM_AGP_BOT=0x%08X\n",
1190		 RREG32(mmMC_VM_AGP_BOT));
1191
1192	if (adev->asic_type == CHIP_KAVERI) {
1193		dev_info(adev->dev, "  CHUB_CONTROL=0x%08X\n",
1194			 RREG32(mmCHUB_CONTROL));
1195	}
1196
1197	dev_info(adev->dev, "  HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
1198		 RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL));
1199	dev_info(adev->dev, "  HDP_NONSURFACE_BASE=0x%08X\n",
1200		 RREG32(mmHDP_NONSURFACE_BASE));
1201	dev_info(adev->dev, "  HDP_NONSURFACE_INFO=0x%08X\n",
1202		 RREG32(mmHDP_NONSURFACE_INFO));
1203	dev_info(adev->dev, "  HDP_NONSURFACE_SIZE=0x%08X\n",
1204		 RREG32(mmHDP_NONSURFACE_SIZE));
1205	dev_info(adev->dev, "  HDP_MISC_CNTL=0x%08X\n",
1206		 RREG32(mmHDP_MISC_CNTL));
1207	dev_info(adev->dev, "  HDP_HOST_PATH_CNTL=0x%08X\n",
1208		 RREG32(mmHDP_HOST_PATH_CNTL));
1209
1210	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
1211		dev_info(adev->dev, "  %d:\n", i);
1212		dev_info(adev->dev, "  0x%04X=0x%08X\n",
1213			 0xb05 + j, RREG32(0xb05 + j));
1214		dev_info(adev->dev, "  0x%04X=0x%08X\n",
1215			 0xb06 + j, RREG32(0xb06 + j));
1216		dev_info(adev->dev, "  0x%04X=0x%08X\n",
1217			 0xb07 + j, RREG32(0xb07 + j));
1218		dev_info(adev->dev, "  0x%04X=0x%08X\n",
1219			 0xb08 + j, RREG32(0xb08 + j));
1220		dev_info(adev->dev, "  0x%04X=0x%08X\n",
1221			 0xb09 + j, RREG32(0xb09 + j));
1222	}
1223
1224	dev_info(adev->dev, "  BIF_FB_EN=0x%08X\n",
1225		 RREG32(mmBIF_FB_EN));
1226}
1227
1228static int gmc_v7_0_soft_reset(void *handle)
1229{
1230	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1231	struct amdgpu_mode_mc_save save;
1232	u32 srbm_soft_reset = 0;
1233	u32 tmp = RREG32(mmSRBM_STATUS);
1234
1235	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1236		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1237						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1238
1239	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1240		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1241		if (!(adev->flags & AMD_IS_APU))
1242			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1243							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1244	}
1245
1246	if (srbm_soft_reset) {
1247		gmc_v7_0_print_status((void *)adev);
1248
1249		gmc_v7_0_mc_stop(adev, &save);
1250		if (gmc_v7_0_wait_for_idle(adev)) {
1251			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1252		}
1253
1254
1255		tmp = RREG32(mmSRBM_SOFT_RESET);
1256		tmp |= srbm_soft_reset;
1257		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1258		WREG32(mmSRBM_SOFT_RESET, tmp);
1259		tmp = RREG32(mmSRBM_SOFT_RESET);
1260
1261		udelay(50);
1262
1263		tmp &= ~srbm_soft_reset;
1264		WREG32(mmSRBM_SOFT_RESET, tmp);
1265		tmp = RREG32(mmSRBM_SOFT_RESET);
1266
1267		/* Wait a little for things to settle down */
1268		udelay(50);
1269
1270		gmc_v7_0_mc_resume(adev, &save);
1271		udelay(50);
1272
1273		gmc_v7_0_print_status((void *)adev);
1274	}
1275
1276	return 0;
1277}
1278
1279static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1280					     struct amdgpu_irq_src *src,
1281					     unsigned type,
1282					     enum amdgpu_interrupt_state state)
1283{
1284	u32 tmp;
1285	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1286		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1287		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1288		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1289		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1290		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1291
1292	switch (state) {
1293	case AMDGPU_IRQ_STATE_DISABLE:
1294		/* system context */
1295		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1296		tmp &= ~bits;
1297		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1298		/* VMs */
1299		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1300		tmp &= ~bits;
1301		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1302		break;
1303	case AMDGPU_IRQ_STATE_ENABLE:
1304		/* system context */
1305		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1306		tmp |= bits;
1307		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1308		/* VMs */
1309		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1310		tmp |= bits;
1311		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1312		break;
1313	default:
1314		break;
1315	}
1316
1317	return 0;
1318}
1319
1320static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1321				      struct amdgpu_irq_src *source,
1322				      struct amdgpu_iv_entry *entry)
1323{
1324	u32 addr, status, mc_client;
1325
1326	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1327	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1328	mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1329	/* reset addr and status */
1330	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1331
1332	if (!addr && !status)
1333		return 0;
1334
1335	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1336		gmc_v7_0_set_fault_enable_default(adev, false);
1337
1338	dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1339		entry->src_id, entry->src_data);
1340	dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1341		addr);
1342	dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1343		status);
1344	gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
 
 
 
1345
1346	return 0;
1347}
1348
1349static int gmc_v7_0_set_clockgating_state(void *handle,
1350					  enum amd_clockgating_state state)
1351{
1352	bool gate = false;
1353	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1354
1355	if (state == AMD_CG_STATE_GATE)
1356		gate = true;
1357
1358	if (!(adev->flags & AMD_IS_APU)) {
1359		gmc_v7_0_enable_mc_mgcg(adev, gate);
1360		gmc_v7_0_enable_mc_ls(adev, gate);
1361	}
1362	gmc_v7_0_enable_bif_mgls(adev, gate);
1363	gmc_v7_0_enable_hdp_mgcg(adev, gate);
1364	gmc_v7_0_enable_hdp_ls(adev, gate);
1365
1366	return 0;
1367}
1368
1369static int gmc_v7_0_set_powergating_state(void *handle,
1370					  enum amd_powergating_state state)
1371{
1372	return 0;
1373}
1374
1375const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
 
1376	.early_init = gmc_v7_0_early_init,
1377	.late_init = gmc_v7_0_late_init,
1378	.sw_init = gmc_v7_0_sw_init,
1379	.sw_fini = gmc_v7_0_sw_fini,
1380	.hw_init = gmc_v7_0_hw_init,
1381	.hw_fini = gmc_v7_0_hw_fini,
1382	.suspend = gmc_v7_0_suspend,
1383	.resume = gmc_v7_0_resume,
1384	.is_idle = gmc_v7_0_is_idle,
1385	.wait_for_idle = gmc_v7_0_wait_for_idle,
1386	.soft_reset = gmc_v7_0_soft_reset,
1387	.print_status = gmc_v7_0_print_status,
1388	.set_clockgating_state = gmc_v7_0_set_clockgating_state,
1389	.set_powergating_state = gmc_v7_0_set_powergating_state,
1390};
1391
1392static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
1393	.flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
1394	.set_pte_pde = gmc_v7_0_gart_set_pte_pde,
 
 
 
 
 
1395};
1396
1397static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
1398	.set = gmc_v7_0_vm_fault_interrupt_state,
1399	.process = gmc_v7_0_process_interrupt,
1400};
1401
1402static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev)
1403{
1404	if (adev->gart.gart_funcs == NULL)
1405		adev->gart.gart_funcs = &gmc_v7_0_gart_funcs;
1406}
1407
1408static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1409{
1410	adev->mc.vm_fault.num_types = 1;
1411	adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
1412}
v4.17
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include <linux/firmware.h>
  24#include <drm/drmP.h>
  25#include <drm/drm_cache.h>
  26#include "amdgpu.h"
  27#include "cikd.h"
  28#include "cik.h"
  29#include "gmc_v7_0.h"
  30#include "amdgpu_ucode.h"
  31
  32#include "bif/bif_4_1_d.h"
  33#include "bif/bif_4_1_sh_mask.h"
  34
  35#include "gmc/gmc_7_1_d.h"
  36#include "gmc/gmc_7_1_sh_mask.h"
  37
  38#include "oss/oss_2_0_d.h"
  39#include "oss/oss_2_0_sh_mask.h"
  40
  41#include "dce/dce_8_0_d.h"
  42#include "dce/dce_8_0_sh_mask.h"
  43
  44#include "amdgpu_atombios.h"
  45
  46static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
  47static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
  48static int gmc_v7_0_wait_for_idle(void *handle);
  49
  50MODULE_FIRMWARE("radeon/bonaire_mc.bin");
  51MODULE_FIRMWARE("radeon/hawaii_mc.bin");
  52MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
  53
  54static const u32 golden_settings_iceland_a11[] =
  55{
  56	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  57	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  58	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  59	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
  60};
  61
  62static const u32 iceland_mgcg_cgcg_init[] =
  63{
  64	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  65};
  66
  67static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
  68{
  69	switch (adev->asic_type) {
  70	case CHIP_TOPAZ:
  71		amdgpu_device_program_register_sequence(adev,
  72							iceland_mgcg_cgcg_init,
  73							ARRAY_SIZE(iceland_mgcg_cgcg_init));
  74		amdgpu_device_program_register_sequence(adev,
  75							golden_settings_iceland_a11,
  76							ARRAY_SIZE(golden_settings_iceland_a11));
  77		break;
  78	default:
  79		break;
  80	}
  81}
  82
  83static void gmc_v7_0_mc_stop(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  84{
  85	u32 blackout;
  86
  87	gmc_v7_0_wait_for_idle((void *)adev);
 
 
 
  88
  89	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  90	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
  91		/* Block CPU access */
  92		WREG32(mmBIF_FB_EN, 0);
  93		/* blackout the MC */
  94		blackout = REG_SET_FIELD(blackout,
  95					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  96		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
  97	}
  98	/* wait for the MC to settle */
  99	udelay(100);
 100}
 101
 102static void gmc_v7_0_mc_resume(struct amdgpu_device *adev)
 
 103{
 104	u32 tmp;
 105
 106	/* unblackout the MC */
 107	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
 108	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
 109	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
 110	/* allow CPU access */
 111	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
 112	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
 113	WREG32(mmBIF_FB_EN, tmp);
 
 
 
 114}
 115
 116/**
 117 * gmc_v7_0_init_microcode - load ucode images from disk
 118 *
 119 * @adev: amdgpu_device pointer
 120 *
 121 * Use the firmware interface to load the ucode images into
 122 * the driver (not loaded into hw).
 123 * Returns 0 on success, error on failure.
 124 */
 125static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
 126{
 127	const char *chip_name;
 128	char fw_name[30];
 129	int err;
 130
 131	DRM_DEBUG("\n");
 132
 133	switch (adev->asic_type) {
 134	case CHIP_BONAIRE:
 135		chip_name = "bonaire";
 136		break;
 137	case CHIP_HAWAII:
 138		chip_name = "hawaii";
 139		break;
 140	case CHIP_TOPAZ:
 141		chip_name = "topaz";
 142		break;
 143	case CHIP_KAVERI:
 144	case CHIP_KABINI:
 145	case CHIP_MULLINS:
 146		return 0;
 147	default: BUG();
 148	}
 149
 150	if (adev->asic_type == CHIP_TOPAZ)
 151		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
 152	else
 153		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
 154
 155	err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
 156	if (err)
 157		goto out;
 158	err = amdgpu_ucode_validate(adev->gmc.fw);
 159
 160out:
 161	if (err) {
 162		pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name);
 163		release_firmware(adev->gmc.fw);
 164		adev->gmc.fw = NULL;
 
 
 165	}
 166	return err;
 167}
 168
 169/**
 170 * gmc_v7_0_mc_load_microcode - load MC ucode into the hw
 171 *
 172 * @adev: amdgpu_device pointer
 173 *
 174 * Load the GDDR MC ucode into the hw (CIK).
 175 * Returns 0 on success, error on failure.
 176 */
 177static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
 178{
 179	const struct mc_firmware_header_v1_0 *hdr;
 180	const __le32 *fw_data = NULL;
 181	const __le32 *io_mc_regs = NULL;
 182	u32 running;
 183	int i, ucode_size, regs_size;
 184
 185	if (!adev->gmc.fw)
 186		return -EINVAL;
 187
 188	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
 189	amdgpu_ucode_print_mc_hdr(&hdr->header);
 190
 191	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
 192	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
 193	io_mc_regs = (const __le32 *)
 194		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
 195	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 196	fw_data = (const __le32 *)
 197		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 198
 199	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
 200
 201	if (running == 0) {
 
 
 
 
 
 202		/* reset the engine and set to writable */
 203		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 204		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
 205
 206		/* load mc io regs */
 207		for (i = 0; i < regs_size; i++) {
 208			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
 209			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
 210		}
 211		/* load the MC ucode */
 212		for (i = 0; i < ucode_size; i++)
 213			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
 214
 215		/* put the engine back into the active state */
 216		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 217		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
 218		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
 219
 220		/* wait for training to complete */
 221		for (i = 0; i < adev->usec_timeout; i++) {
 222			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
 223					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
 224				break;
 225			udelay(1);
 226		}
 227		for (i = 0; i < adev->usec_timeout; i++) {
 228			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
 229					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
 230				break;
 231			udelay(1);
 232		}
 
 
 
 233	}
 234
 235	return 0;
 236}
 237
 238static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
 239				       struct amdgpu_gmc *mc)
 240{
 241	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
 242	base <<= 24;
 243
 244	amdgpu_device_vram_location(adev, &adev->gmc, base);
 245	amdgpu_device_gart_location(adev, mc);
 
 
 
 
 246}
 247
 248/**
 249 * gmc_v7_0_mc_program - program the GPU memory controller
 250 *
 251 * @adev: amdgpu_device pointer
 252 *
 253 * Set the location of vram, gart, and AGP in the GPU's
 254 * physical address space (CIK).
 255 */
 256static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
 257{
 
 258	u32 tmp;
 259	int i, j;
 260
 261	/* Initialize HDP */
 262	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
 263		WREG32((0xb05 + j), 0x00000000);
 264		WREG32((0xb06 + j), 0x00000000);
 265		WREG32((0xb07 + j), 0x00000000);
 266		WREG32((0xb08 + j), 0x00000000);
 267		WREG32((0xb09 + j), 0x00000000);
 268	}
 269	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 270
 271	if (gmc_v7_0_wait_for_idle((void *)adev)) {
 
 
 
 
 272		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 273	}
 274	if (adev->mode_info.num_crtc) {
 275		/* Lockout access through VGA aperture*/
 276		tmp = RREG32(mmVGA_HDP_CONTROL);
 277		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
 278		WREG32(mmVGA_HDP_CONTROL, tmp);
 279
 280		/* disable VGA render */
 281		tmp = RREG32(mmVGA_RENDER_CONTROL);
 282		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
 283		WREG32(mmVGA_RENDER_CONTROL, tmp);
 284	}
 285	/* Update configuration */
 286	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 287	       adev->gmc.vram_start >> 12);
 288	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 289	       adev->gmc.vram_end >> 12);
 290	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
 291	       adev->vram_scratch.gpu_addr >> 12);
 
 
 
 
 
 
 
 292	WREG32(mmMC_VM_AGP_BASE, 0);
 293	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
 294	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
 295	if (gmc_v7_0_wait_for_idle((void *)adev)) {
 296		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 297	}
 
 298
 299	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
 300
 301	tmp = RREG32(mmHDP_MISC_CNTL);
 302	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
 303	WREG32(mmHDP_MISC_CNTL, tmp);
 304
 305	tmp = RREG32(mmHDP_HOST_PATH_CNTL);
 306	WREG32(mmHDP_HOST_PATH_CNTL, tmp);
 307}
 308
 309/**
 310 * gmc_v7_0_mc_init - initialize the memory controller driver params
 311 *
 312 * @adev: amdgpu_device pointer
 313 *
 314 * Look up the amount of vram, vram width, and decide how to place
 315 * vram and gart within the GPU's physical address space (CIK).
 316 * Returns 0 for success.
 317 */
 318static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
 319{
 320	int r;
 
 321
 322	adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
 323	if (!adev->gmc.vram_width) {
 324		u32 tmp;
 325		int chansize, numchan;
 326
 327		/* Get VRAM informations */
 328		tmp = RREG32(mmMC_ARB_RAMCFG);
 329		if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
 330			chansize = 64;
 331		} else {
 332			chansize = 32;
 333		}
 334		tmp = RREG32(mmMC_SHARED_CHMAP);
 335		switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
 336		case 0:
 337		default:
 338			numchan = 1;
 339			break;
 340		case 1:
 341			numchan = 2;
 342			break;
 343		case 2:
 344			numchan = 4;
 345			break;
 346		case 3:
 347			numchan = 8;
 348			break;
 349		case 4:
 350			numchan = 3;
 351			break;
 352		case 5:
 353			numchan = 6;
 354			break;
 355		case 6:
 356			numchan = 10;
 357			break;
 358		case 7:
 359			numchan = 12;
 360			break;
 361		case 8:
 362			numchan = 16;
 363			break;
 364		}
 365		adev->gmc.vram_width = numchan * chansize;
 366	}
 
 
 
 
 367	/* size in MB on si */
 368	adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 369	adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 
 370
 371	if (!(adev->flags & AMD_IS_APU)) {
 372		r = amdgpu_device_resize_fb_bar(adev);
 373		if (r)
 374			return r;
 375	}
 376	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
 377	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 378
 379#ifdef CONFIG_X86_64
 380	if (adev->flags & AMD_IS_APU) {
 381		adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
 382		adev->gmc.aper_size = adev->gmc.real_vram_size;
 383	}
 384#endif
 385
 386	/* In case the PCI BAR is larger than the actual amount of vram */
 387	adev->gmc.visible_vram_size = adev->gmc.aper_size;
 388	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
 389		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
 390
 391	/* set the gart size */
 392	if (amdgpu_gart_size == -1) {
 393		switch (adev->asic_type) {
 394		case CHIP_TOPAZ:     /* no MM engines */
 395		default:
 396			adev->gmc.gart_size = 256ULL << 20;
 397			break;
 398#ifdef CONFIG_DRM_AMDGPU_CIK
 399		case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */
 400		case CHIP_HAWAII:  /* UVD, VCE do not support GPUVM */
 401		case CHIP_KAVERI:  /* UVD, VCE do not support GPUVM */
 402		case CHIP_KABINI:  /* UVD, VCE do not support GPUVM */
 403		case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */
 404			adev->gmc.gart_size = 1024ULL << 20;
 405			break;
 406#endif
 407		}
 408	} else {
 409		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
 410	}
 411
 412	gmc_v7_0_vram_gtt_location(adev, &adev->gmc);
 413
 414	return 0;
 415}
 416
 417/*
 418 * GART
 419 * VMID 0 is the physical GPU addresses as used by the kernel.
 420 * VMIDs 1-15 are used for userspace clients and are handled
 421 * by the amdgpu vm/hsa code.
 422 */
 423
 424/**
 425 * gmc_v7_0_flush_gpu_tlb - gart tlb flush callback
 426 *
 427 * @adev: amdgpu_device pointer
 428 * @vmid: vm instance to flush
 429 *
 430 * Flush the TLB for the requested page table (CIK).
 431 */
 432static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
 
 433{
 
 
 
 434	/* bits 0-15 are the VM contexts0-15 */
 435	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 436}
 437
 438static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 439					    unsigned vmid, uint64_t pd_addr)
 440{
 441	uint32_t reg;
 442
 443	if (vmid < 8)
 444		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
 445	else
 446		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
 447	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
 448
 449	/* bits 0-15 are the VM contexts0-15 */
 450	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
 451
 452	return pd_addr;
 453}
 454
 455static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
 456					unsigned pasid)
 457{
 458	amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
 459}
 460
 461/**
 462 * gmc_v7_0_set_pte_pde - update the page tables using MMIO
 463 *
 464 * @adev: amdgpu_device pointer
 465 * @cpu_pt_addr: cpu address of the page table
 466 * @gpu_page_idx: entry in the page table to update
 467 * @addr: dst addr to write into pte/pde
 468 * @flags: access flags
 469 *
 470 * Update the page tables using the CPU.
 471 */
 472static int gmc_v7_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
 473				 uint32_t gpu_page_idx, uint64_t addr,
 474				 uint64_t flags)
 
 
 475{
 476	void __iomem *ptr = (void *)cpu_pt_addr;
 477	uint64_t value;
 478
 479	value = addr & 0xFFFFFFFFFFFFF000ULL;
 480	value |= flags;
 481	writeq(value, ptr + (gpu_page_idx * 8));
 482
 483	return 0;
 484}
 485
 486static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
 487					  uint32_t flags)
 488{
 489	uint64_t pte_flag = 0;
 490
 491	if (flags & AMDGPU_VM_PAGE_READABLE)
 492		pte_flag |= AMDGPU_PTE_READABLE;
 493	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
 494		pte_flag |= AMDGPU_PTE_WRITEABLE;
 495	if (flags & AMDGPU_VM_PAGE_PRT)
 496		pte_flag |= AMDGPU_PTE_PRT;
 497
 498	return pte_flag;
 499}
 500
 501static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
 502				uint64_t *addr, uint64_t *flags)
 503{
 504	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
 505}
 506
 507/**
 508 * gmc_v8_0_set_fault_enable_default - update VM fault handling
 509 *
 510 * @adev: amdgpu_device pointer
 511 * @value: true redirects VM faults to the default page
 512 */
 513static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
 514					      bool value)
 515{
 516	u32 tmp;
 517
 518	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 519	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 520			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 521	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 522			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 523	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 524			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 525	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 526			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 527	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 528			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 529	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 530			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 531	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 532}
 533
 534/**
 535 * gmc_v7_0_set_prt - set PRT VM fault
 536 *
 537 * @adev: amdgpu_device pointer
 538 * @enable: enable/disable VM fault handling for PRT
 539 */
 540static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
 541{
 542	uint32_t tmp;
 543
 544	if (enable && !adev->gmc.prt_warning) {
 545		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
 546		adev->gmc.prt_warning = true;
 547	}
 548
 549	tmp = RREG32(mmVM_PRT_CNTL);
 550	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 551			    CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
 552	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 553			    CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
 554	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 555			    TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
 556	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 557			    TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
 558	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 559			    L2_CACHE_STORE_INVALID_ENTRIES, enable);
 560	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 561			    L1_TLB_STORE_INVALID_ENTRIES, enable);
 562	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
 563			    MASK_PDE0_FAULT, enable);
 564	WREG32(mmVM_PRT_CNTL, tmp);
 565
 566	if (enable) {
 567		uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
 568		uint32_t high = adev->vm_manager.max_pfn -
 569			(AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
 570
 571		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
 572		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
 573		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
 574		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
 575		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
 576		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
 577		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
 578		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
 579	} else {
 580		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
 581		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
 582		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
 583		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
 584		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
 585		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
 586		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
 587		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
 588	}
 589}
 590
 591/**
 592 * gmc_v7_0_gart_enable - gart enable
 593 *
 594 * @adev: amdgpu_device pointer
 595 *
 596 * This sets up the TLBs, programs the page tables for VMID0,
 597 * sets up the hw for VMIDs 1-15 which are allocated on
 598 * demand, and sets up the global locations for the LDS, GDS,
 599 * and GPUVM for FSA64 clients (CIK).
 600 * Returns 0 for success, errors for failure.
 601 */
 602static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 603{
 604	int r, i;
 605	u32 tmp, field;
 606
 607	if (adev->gart.robj == NULL) {
 608		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 609		return -EINVAL;
 610	}
 611	r = amdgpu_gart_table_vram_pin(adev);
 612	if (r)
 613		return r;
 614	/* Setup TLB control */
 615	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
 616	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
 617	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
 618	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
 619	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
 620	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
 621	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
 622	/* Setup L2 cache */
 623	tmp = RREG32(mmVM_L2_CNTL);
 624	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
 625	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
 626	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
 627	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
 628	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
 629	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
 630	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
 631	WREG32(mmVM_L2_CNTL, tmp);
 632	tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
 633	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
 634	WREG32(mmVM_L2_CNTL2, tmp);
 635
 636	field = adev->vm_manager.fragment_size;
 637	tmp = RREG32(mmVM_L2_CNTL3);
 638	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
 639	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
 640	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
 641	WREG32(mmVM_L2_CNTL3, tmp);
 642	/* setup context0 */
 643	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
 644	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
 645	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
 646	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 647			(u32)(adev->dummy_page_addr >> 12));
 648	WREG32(mmVM_CONTEXT0_CNTL2, 0);
 649	tmp = RREG32(mmVM_CONTEXT0_CNTL);
 650	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
 651	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
 652	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 653	WREG32(mmVM_CONTEXT0_CNTL, tmp);
 654
 655	WREG32(0x575, 0);
 656	WREG32(0x576, 0);
 657	WREG32(0x577, 0);
 658
 659	/* empty context1-15 */
 660	/* FIXME start with 4G, once using 2 level pt switch to full
 661	 * vm size space
 662	 */
 663	/* set vm size, must be a multiple of 4 */
 664	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
 665	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
 666	for (i = 1; i < 16; i++) {
 667		if (i < 8)
 668			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
 669			       adev->gart.table_addr >> 12);
 670		else
 671			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
 672			       adev->gart.table_addr >> 12);
 673	}
 674
 675	/* enable context1-15 */
 676	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
 677	       (u32)(adev->dummy_page_addr >> 12));
 678	WREG32(mmVM_CONTEXT1_CNTL2, 4);
 679	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 680	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
 681	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
 682	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
 683			    adev->vm_manager.block_size - 9);
 684	WREG32(mmVM_CONTEXT1_CNTL, tmp);
 685	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
 686		gmc_v7_0_set_fault_enable_default(adev, false);
 687	else
 688		gmc_v7_0_set_fault_enable_default(adev, true);
 689
 690	if (adev->asic_type == CHIP_KAVERI) {
 691		tmp = RREG32(mmCHUB_CONTROL);
 692		tmp &= ~BYPASS_VM;
 693		WREG32(mmCHUB_CONTROL, tmp);
 694	}
 695
 696	gmc_v7_0_flush_gpu_tlb(adev, 0);
 697	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 698		 (unsigned)(adev->gmc.gart_size >> 20),
 699		 (unsigned long long)adev->gart.table_addr);
 700	adev->gart.ready = true;
 701	return 0;
 702}
 703
 704static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
 705{
 706	int r;
 707
 708	if (adev->gart.robj) {
 709		WARN(1, "R600 PCIE GART already initialized\n");
 710		return 0;
 711	}
 712	/* Initialize common gart structure */
 713	r = amdgpu_gart_init(adev);
 714	if (r)
 715		return r;
 716	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 717	adev->gart.gart_pte_flags = 0;
 718	return amdgpu_gart_table_vram_alloc(adev);
 719}
 720
 721/**
 722 * gmc_v7_0_gart_disable - gart disable
 723 *
 724 * @adev: amdgpu_device pointer
 725 *
 726 * This disables all VM page table (CIK).
 727 */
 728static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
 729{
 730	u32 tmp;
 731
 732	/* Disable all tables */
 733	WREG32(mmVM_CONTEXT0_CNTL, 0);
 734	WREG32(mmVM_CONTEXT1_CNTL, 0);
 735	/* Setup TLB control */
 736	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
 737	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
 738	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
 739	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
 740	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
 741	/* Setup L2 cache */
 742	tmp = RREG32(mmVM_L2_CNTL);
 743	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
 744	WREG32(mmVM_L2_CNTL, tmp);
 745	WREG32(mmVM_L2_CNTL2, 0);
 746	amdgpu_gart_table_vram_unpin(adev);
 747}
 748
 749/**
 750 * gmc_v7_0_gart_fini - vm fini callback
 751 *
 752 * @adev: amdgpu_device pointer
 753 *
 754 * Tears down the driver GART/VM setup (CIK).
 755 */
 756static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
 757{
 758	amdgpu_gart_table_vram_free(adev);
 759	amdgpu_gart_fini(adev);
 760}
 761
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 762/**
 763 * gmc_v7_0_vm_decode_fault - print human readable fault info
 764 *
 765 * @adev: amdgpu_device pointer
 766 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
 767 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
 768 *
 769 * Print human readable fault information (CIK).
 770 */
 771static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
 772				     u32 addr, u32 mc_client, unsigned pasid)
 773{
 
 774	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
 775	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 776					PROTECTIONS);
 777	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
 778		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
 779	u32 mc_id;
 780
 781	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 782			      MEMORY_CLIENT_ID);
 783
 784	dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
 785	       protections, vmid, pasid, addr,
 786	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
 787			     MEMORY_CLIENT_RW) ?
 788	       "write" : "read", block, mc_client, mc_id);
 789}
 790
 791
 792static const u32 mc_cg_registers[] = {
 793	mmMC_HUB_MISC_HUB_CG,
 794	mmMC_HUB_MISC_SIP_CG,
 795	mmMC_HUB_MISC_VM_CG,
 796	mmMC_XPB_CLK_GAT,
 797	mmATC_MISC_CG,
 798	mmMC_CITF_MISC_WR_CG,
 799	mmMC_CITF_MISC_RD_CG,
 800	mmMC_CITF_MISC_VM_CG,
 801	mmVM_L2_CG,
 802};
 803
 804static const u32 mc_cg_ls_en[] = {
 805	MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
 806	MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
 807	MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
 808	MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
 809	ATC_MISC_CG__MEM_LS_ENABLE_MASK,
 810	MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
 811	MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
 812	MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
 813	VM_L2_CG__MEM_LS_ENABLE_MASK,
 814};
 815
 816static const u32 mc_cg_en[] = {
 817	MC_HUB_MISC_HUB_CG__ENABLE_MASK,
 818	MC_HUB_MISC_SIP_CG__ENABLE_MASK,
 819	MC_HUB_MISC_VM_CG__ENABLE_MASK,
 820	MC_XPB_CLK_GAT__ENABLE_MASK,
 821	ATC_MISC_CG__ENABLE_MASK,
 822	MC_CITF_MISC_WR_CG__ENABLE_MASK,
 823	MC_CITF_MISC_RD_CG__ENABLE_MASK,
 824	MC_CITF_MISC_VM_CG__ENABLE_MASK,
 825	VM_L2_CG__ENABLE_MASK,
 826};
 827
 828static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
 829				  bool enable)
 830{
 831	int i;
 832	u32 orig, data;
 833
 834	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 835		orig = data = RREG32(mc_cg_registers[i]);
 836		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
 837			data |= mc_cg_ls_en[i];
 838		else
 839			data &= ~mc_cg_ls_en[i];
 840		if (data != orig)
 841			WREG32(mc_cg_registers[i], data);
 842	}
 843}
 844
 845static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
 846				    bool enable)
 847{
 848	int i;
 849	u32 orig, data;
 850
 851	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 852		orig = data = RREG32(mc_cg_registers[i]);
 853		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
 854			data |= mc_cg_en[i];
 855		else
 856			data &= ~mc_cg_en[i];
 857		if (data != orig)
 858			WREG32(mc_cg_registers[i], data);
 859	}
 860}
 861
 862static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
 863				     bool enable)
 864{
 865	u32 orig, data;
 866
 867	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
 868
 869	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
 870		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
 871		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
 872		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
 873		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
 874	} else {
 875		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
 876		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
 877		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
 878		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
 879	}
 880
 881	if (orig != data)
 882		WREG32_PCIE(ixPCIE_CNTL2, data);
 883}
 884
 885static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
 886				     bool enable)
 887{
 888	u32 orig, data;
 889
 890	orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
 891
 892	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
 893		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
 894	else
 895		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
 896
 897	if (orig != data)
 898		WREG32(mmHDP_HOST_PATH_CNTL, data);
 899}
 900
 901static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
 902				   bool enable)
 903{
 904	u32 orig, data;
 905
 906	orig = data = RREG32(mmHDP_MEM_POWER_LS);
 907
 908	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
 909		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
 910	else
 911		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
 912
 913	if (orig != data)
 914		WREG32(mmHDP_MEM_POWER_LS, data);
 915}
 916
 917static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type)
 918{
 919	switch (mc_seq_vram_type) {
 920	case MC_SEQ_MISC0__MT__GDDR1:
 921		return AMDGPU_VRAM_TYPE_GDDR1;
 922	case MC_SEQ_MISC0__MT__DDR2:
 923		return AMDGPU_VRAM_TYPE_DDR2;
 924	case MC_SEQ_MISC0__MT__GDDR3:
 925		return AMDGPU_VRAM_TYPE_GDDR3;
 926	case MC_SEQ_MISC0__MT__GDDR4:
 927		return AMDGPU_VRAM_TYPE_GDDR4;
 928	case MC_SEQ_MISC0__MT__GDDR5:
 929		return AMDGPU_VRAM_TYPE_GDDR5;
 930	case MC_SEQ_MISC0__MT__HBM:
 931		return AMDGPU_VRAM_TYPE_HBM;
 932	case MC_SEQ_MISC0__MT__DDR3:
 933		return AMDGPU_VRAM_TYPE_DDR3;
 934	default:
 935		return AMDGPU_VRAM_TYPE_UNKNOWN;
 936	}
 937}
 938
 939static int gmc_v7_0_early_init(void *handle)
 940{
 941	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 942
 943	gmc_v7_0_set_gmc_funcs(adev);
 944	gmc_v7_0_set_irq_funcs(adev);
 945
 946	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
 947	adev->gmc.shared_aperture_end =
 948		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
 949	adev->gmc.private_aperture_start =
 950		adev->gmc.shared_aperture_end + 1;
 951	adev->gmc.private_aperture_end =
 952		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
 953
 954	return 0;
 955}
 956
 957static int gmc_v7_0_late_init(void *handle)
 958{
 959	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 960
 961	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
 962		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
 963	else
 964		return 0;
 965}
 966
 967static int gmc_v7_0_sw_init(void *handle)
 968{
 969	int r;
 970	int dma_bits;
 971	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 972
 973	if (adev->flags & AMD_IS_APU) {
 974		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
 975	} else {
 976		u32 tmp = RREG32(mmMC_SEQ_MISC0);
 977		tmp &= MC_SEQ_MISC0__MT__MASK;
 978		adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
 979	}
 980
 981	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
 982	if (r)
 983		return r;
 984
 985	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
 986	if (r)
 987		return r;
 988
 989	/* Adjust VM size here.
 990	 * Currently set to 4GB ((1 << 20) 4k pages).
 991	 * Max GPUVM size for cayman and SI is 40 bits.
 992	 */
 993	amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
 994
 995	/* Set the internal MC address mask
 996	 * This is the max address of the GPU's
 997	 * internal address space.
 998	 */
 999	adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1000
1001	adev->gmc.stolen_size = 256 * 1024;
1002
1003	/* set DMA mask + need_dma32 flags.
1004	 * PCIE - can handle 40-bits.
1005	 * IGP - can handle 40-bits
1006	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1007	 */
1008	adev->need_dma32 = false;
1009	dma_bits = adev->need_dma32 ? 32 : 40;
1010	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1011	if (r) {
1012		adev->need_dma32 = true;
1013		dma_bits = 32;
1014		pr_warn("amdgpu: No suitable DMA available\n");
1015	}
1016	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1017	if (r) {
1018		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
1019		pr_warn("amdgpu: No coherent DMA available\n");
1020	}
1021	adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
1022
1023	r = gmc_v7_0_init_microcode(adev);
1024	if (r) {
1025		DRM_ERROR("Failed to load mc firmware!\n");
1026		return r;
1027	}
1028
1029	r = gmc_v7_0_mc_init(adev);
1030	if (r)
1031		return r;
1032
1033	/* Memory manager */
1034	r = amdgpu_bo_init(adev);
1035	if (r)
1036		return r;
1037
1038	r = gmc_v7_0_gart_init(adev);
1039	if (r)
1040		return r;
1041
1042	/*
1043	 * number of VMs
1044	 * VMID 0 is reserved for System
1045	 * amdgpu graphics/compute will use VMIDs 1-7
1046	 * amdkfd will use VMIDs 8-15
1047	 */
1048	adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
1049	amdgpu_vm_manager_init(adev);
1050
1051	/* base offset of vram pages */
1052	if (adev->flags & AMD_IS_APU) {
1053		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
1054
1055		tmp <<= 22;
1056		adev->vm_manager.vram_base_offset = tmp;
1057	} else {
1058		adev->vm_manager.vram_base_offset = 0;
1059	}
1060
1061	return 0;
1062}
1063
1064static int gmc_v7_0_sw_fini(void *handle)
1065{
1066	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1067
 
 
 
 
 
 
1068	amdgpu_gem_force_release(adev);
1069	amdgpu_vm_manager_fini(adev);
1070	gmc_v7_0_gart_fini(adev);
1071	amdgpu_bo_fini(adev);
1072	release_firmware(adev->gmc.fw);
1073	adev->gmc.fw = NULL;
1074
1075	return 0;
1076}
1077
1078static int gmc_v7_0_hw_init(void *handle)
1079{
1080	int r;
1081	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1082
1083	gmc_v7_0_init_golden_registers(adev);
1084
1085	gmc_v7_0_mc_program(adev);
1086
1087	if (!(adev->flags & AMD_IS_APU)) {
1088		r = gmc_v7_0_mc_load_microcode(adev);
1089		if (r) {
1090			DRM_ERROR("Failed to load MC firmware!\n");
1091			return r;
1092		}
1093	}
1094
1095	r = gmc_v7_0_gart_enable(adev);
1096	if (r)
1097		return r;
1098
1099	return r;
1100}
1101
1102static int gmc_v7_0_hw_fini(void *handle)
1103{
1104	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1105
1106	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1107	gmc_v7_0_gart_disable(adev);
1108
1109	return 0;
1110}
1111
1112static int gmc_v7_0_suspend(void *handle)
1113{
1114	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1115
 
 
 
 
1116	gmc_v7_0_hw_fini(adev);
1117
1118	return 0;
1119}
1120
1121static int gmc_v7_0_resume(void *handle)
1122{
1123	int r;
1124	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1125
1126	r = gmc_v7_0_hw_init(adev);
1127	if (r)
1128		return r;
1129
1130	amdgpu_vmid_reset_all(adev);
 
 
 
 
 
 
 
1131
1132	return 0;
1133}
1134
1135static bool gmc_v7_0_is_idle(void *handle)
1136{
1137	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1138	u32 tmp = RREG32(mmSRBM_STATUS);
1139
1140	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1141		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1142		return false;
1143
1144	return true;
1145}
1146
1147static int gmc_v7_0_wait_for_idle(void *handle)
1148{
1149	unsigned i;
1150	u32 tmp;
1151	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1152
1153	for (i = 0; i < adev->usec_timeout; i++) {
1154		/* read MC_STATUS */
1155		tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1156					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1157					       SRBM_STATUS__MCC_BUSY_MASK |
1158					       SRBM_STATUS__MCD_BUSY_MASK |
1159					       SRBM_STATUS__VMC_BUSY_MASK);
1160		if (!tmp)
1161			return 0;
1162		udelay(1);
1163	}
1164	return -ETIMEDOUT;
1165
1166}
1167
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1168static int gmc_v7_0_soft_reset(void *handle)
1169{
1170	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
1171	u32 srbm_soft_reset = 0;
1172	u32 tmp = RREG32(mmSRBM_STATUS);
1173
1174	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1175		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1176						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1177
1178	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1179		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1180		if (!(adev->flags & AMD_IS_APU))
1181			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1182							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1183	}
1184
1185	if (srbm_soft_reset) {
1186		gmc_v7_0_mc_stop(adev);
1187		if (gmc_v7_0_wait_for_idle((void *)adev)) {
 
 
1188			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1189		}
1190
1191
1192		tmp = RREG32(mmSRBM_SOFT_RESET);
1193		tmp |= srbm_soft_reset;
1194		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1195		WREG32(mmSRBM_SOFT_RESET, tmp);
1196		tmp = RREG32(mmSRBM_SOFT_RESET);
1197
1198		udelay(50);
1199
1200		tmp &= ~srbm_soft_reset;
1201		WREG32(mmSRBM_SOFT_RESET, tmp);
1202		tmp = RREG32(mmSRBM_SOFT_RESET);
1203
1204		/* Wait a little for things to settle down */
1205		udelay(50);
1206
1207		gmc_v7_0_mc_resume(adev);
1208		udelay(50);
 
 
1209	}
1210
1211	return 0;
1212}
1213
1214static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1215					     struct amdgpu_irq_src *src,
1216					     unsigned type,
1217					     enum amdgpu_interrupt_state state)
1218{
1219	u32 tmp;
1220	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1221		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1222		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1223		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1224		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1225		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1226
1227	switch (state) {
1228	case AMDGPU_IRQ_STATE_DISABLE:
1229		/* system context */
1230		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1231		tmp &= ~bits;
1232		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1233		/* VMs */
1234		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1235		tmp &= ~bits;
1236		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1237		break;
1238	case AMDGPU_IRQ_STATE_ENABLE:
1239		/* system context */
1240		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1241		tmp |= bits;
1242		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1243		/* VMs */
1244		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1245		tmp |= bits;
1246		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1247		break;
1248	default:
1249		break;
1250	}
1251
1252	return 0;
1253}
1254
1255static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1256				      struct amdgpu_irq_src *source,
1257				      struct amdgpu_iv_entry *entry)
1258{
1259	u32 addr, status, mc_client;
1260
1261	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1262	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1263	mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1264	/* reset addr and status */
1265	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1266
1267	if (!addr && !status)
1268		return 0;
1269
1270	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1271		gmc_v7_0_set_fault_enable_default(adev, false);
1272
1273	if (printk_ratelimit()) {
1274		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1275			entry->src_id, entry->src_data[0]);
1276		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1277			addr);
1278		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1279			status);
1280		gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client,
1281					 entry->pasid);
1282	}
1283
1284	return 0;
1285}
1286
1287static int gmc_v7_0_set_clockgating_state(void *handle,
1288					  enum amd_clockgating_state state)
1289{
1290	bool gate = false;
1291	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1292
1293	if (state == AMD_CG_STATE_GATE)
1294		gate = true;
1295
1296	if (!(adev->flags & AMD_IS_APU)) {
1297		gmc_v7_0_enable_mc_mgcg(adev, gate);
1298		gmc_v7_0_enable_mc_ls(adev, gate);
1299	}
1300	gmc_v7_0_enable_bif_mgls(adev, gate);
1301	gmc_v7_0_enable_hdp_mgcg(adev, gate);
1302	gmc_v7_0_enable_hdp_ls(adev, gate);
1303
1304	return 0;
1305}
1306
1307static int gmc_v7_0_set_powergating_state(void *handle,
1308					  enum amd_powergating_state state)
1309{
1310	return 0;
1311}
1312
1313static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
1314	.name = "gmc_v7_0",
1315	.early_init = gmc_v7_0_early_init,
1316	.late_init = gmc_v7_0_late_init,
1317	.sw_init = gmc_v7_0_sw_init,
1318	.sw_fini = gmc_v7_0_sw_fini,
1319	.hw_init = gmc_v7_0_hw_init,
1320	.hw_fini = gmc_v7_0_hw_fini,
1321	.suspend = gmc_v7_0_suspend,
1322	.resume = gmc_v7_0_resume,
1323	.is_idle = gmc_v7_0_is_idle,
1324	.wait_for_idle = gmc_v7_0_wait_for_idle,
1325	.soft_reset = gmc_v7_0_soft_reset,
 
1326	.set_clockgating_state = gmc_v7_0_set_clockgating_state,
1327	.set_powergating_state = gmc_v7_0_set_powergating_state,
1328};
1329
1330static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
1331	.flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
1332	.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
1333	.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
1334	.set_pte_pde = gmc_v7_0_set_pte_pde,
1335	.set_prt = gmc_v7_0_set_prt,
1336	.get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
1337	.get_vm_pde = gmc_v7_0_get_vm_pde
1338};
1339
1340static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
1341	.set = gmc_v7_0_vm_fault_interrupt_state,
1342	.process = gmc_v7_0_process_interrupt,
1343};
1344
1345static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev)
1346{
1347	if (adev->gmc.gmc_funcs == NULL)
1348		adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
1349}
1350
1351static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1352{
1353	adev->gmc.vm_fault.num_types = 1;
1354	adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
1355}
1356
1357const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
1358{
1359	.type = AMD_IP_BLOCK_TYPE_GMC,
1360	.major = 7,
1361	.minor = 0,
1362	.rev = 0,
1363	.funcs = &gmc_v7_0_ip_funcs,
1364};
1365
1366const struct amdgpu_ip_block_version gmc_v7_4_ip_block =
1367{
1368	.type = AMD_IP_BLOCK_TYPE_GMC,
1369	.major = 7,
1370	.minor = 4,
1371	.rev = 0,
1372	.funcs = &gmc_v7_0_ip_funcs,
1373};