Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2019 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include <linux/firmware.h>
  24#include <linux/pci.h>
  25
  26#include <drm/drm_cache.h>
  27
  28#include "amdgpu.h"
  29#include "amdgpu_atomfirmware.h"
  30#include "gmc_v10_0.h"
  31#include "umc_v8_7.h"
  32
  33#include "athub/athub_2_0_0_sh_mask.h"
  34#include "athub/athub_2_0_0_offset.h"
  35#include "dcn/dcn_2_0_0_offset.h"
  36#include "dcn/dcn_2_0_0_sh_mask.h"
  37#include "oss/osssys_5_0_0_offset.h"
  38#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
  39#include "navi10_enum.h"
  40
  41#include "soc15.h"
  42#include "soc15d.h"
  43#include "soc15_common.h"
  44
  45#include "nbio_v2_3.h"
  46
  47#include "gfxhub_v2_0.h"
  48#include "gfxhub_v2_1.h"
  49#include "mmhub_v2_0.h"
  50#include "mmhub_v2_3.h"
  51#include "athub_v2_0.h"
  52#include "athub_v2_1.h"
  53
  54static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
  55					 struct amdgpu_irq_src *src,
  56					 unsigned int type,
  57					 enum amdgpu_interrupt_state state)
  58{
  59	return 0;
  60}
  61
  62static int
  63gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
  64				   struct amdgpu_irq_src *src, unsigned int type,
  65				   enum amdgpu_interrupt_state state)
  66{
  67	switch (state) {
  68	case AMDGPU_IRQ_STATE_DISABLE:
  69		/* MM HUB */
  70		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false);
  71		/* GFX HUB */
  72		/* This works because this interrupt is only
  73		 * enabled at init/resume and disabled in
  74		 * fini/suspend, so the overall state doesn't
  75		 * change over the course of suspend/resume.
  76		 */
  77		if (!adev->in_s0ix)
  78			amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false);
  79		break;
  80	case AMDGPU_IRQ_STATE_ENABLE:
  81		/* MM HUB */
  82		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true);
  83		/* GFX HUB */
  84		/* This works because this interrupt is only
  85		 * enabled at init/resume and disabled in
  86		 * fini/suspend, so the overall state doesn't
  87		 * change over the course of suspend/resume.
  88		 */
  89		if (!adev->in_s0ix)
  90			amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true);
  91		break;
  92	default:
  93		break;
  94	}
  95
  96	return 0;
  97}
  98
  99static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
 100				       struct amdgpu_irq_src *source,
 101				       struct amdgpu_iv_entry *entry)
 102{
 103	uint32_t vmhub_index = entry->client_id == SOC15_IH_CLIENTID_VMC ?
 104			       AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
 105	struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
 106	bool retry_fault = !!(entry->src_data[1] & 0x80);
 107	bool write_fault = !!(entry->src_data[1] & 0x20);
 108	struct amdgpu_task_info *task_info;
 109	uint32_t status = 0;
 110	u64 addr;
 111
 112	addr = (u64)entry->src_data[0] << 12;
 113	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
 114
 115	if (retry_fault) {
 116		/* Returning 1 here also prevents sending the IV to the KFD */
 117
 118		/* Process it onyl if it's the first fault for this address */
 119		if (entry->ih != &adev->irq.ih_soft &&
 120		    amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
 121					     entry->timestamp))
 122			return 1;
 123
 124		/* Delegate it to a different ring if the hardware hasn't
 125		 * already done it.
 126		 */
 127		if (entry->ih == &adev->irq.ih) {
 128			amdgpu_irq_delegate(adev, entry, 8);
 129			return 1;
 130		}
 131
 132		/* Try to handle the recoverable page faults by filling page
 133		 * tables
 134		 */
 135		if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, write_fault))
 136			return 1;
 137	}
 138
 139	if (!amdgpu_sriov_vf(adev)) {
 140		/*
 141		 * Issue a dummy read to wait for the status register to
 142		 * be updated to avoid reading an incorrect value due to
 143		 * the new fast GRBM interface.
 144		 */
 145		if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&
 146		    (amdgpu_ip_version(adev, GC_HWIP, 0) <
 147		     IP_VERSION(10, 3, 0)))
 148			RREG32(hub->vm_l2_pro_fault_status);
 149
 150		status = RREG32(hub->vm_l2_pro_fault_status);
 151		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
 152
 153		amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status,
 154					     entry->vmid_src ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0));
 155	}
 156
 157	if (!printk_ratelimit())
 158		return 0;
 159
 
 
 
 160	dev_err(adev->dev,
 161		"[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
 162		entry->vmid_src ? "mmhub" : "gfxhub",
 163		entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
 164	task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
 165	if (task_info) {
 166		dev_err(adev->dev,
 167			" in process %s pid %d thread %s pid %d\n",
 168			task_info->process_name, task_info->tgid,
 169			task_info->task_name, task_info->pid);
 170		amdgpu_vm_put_task_info(task_info);
 171	}
 172
 173	dev_err(adev->dev, "  in page starting at address 0x%016llx from client 0x%x (%s)\n",
 174			addr, entry->client_id,
 175			soc15_ih_clientid_name[entry->client_id]);
 176
 177	if (!amdgpu_sriov_vf(adev))
 178		hub->vmhub_funcs->print_l2_protection_fault_status(adev,
 179								   status);
 180
 181	return 0;
 182}
 183
 184static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
 185	.set = gmc_v10_0_vm_fault_interrupt_state,
 186	.process = gmc_v10_0_process_interrupt,
 187};
 188
 189static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
 190	.set = gmc_v10_0_ecc_interrupt_state,
 191	.process = amdgpu_umc_process_ecc_irq,
 192};
 193
 194static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
 195{
 196	adev->gmc.vm_fault.num_types = 1;
 197	adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
 198
 199	if (!amdgpu_sriov_vf(adev)) {
 200		adev->gmc.ecc_irq.num_types = 1;
 201		adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
 202	}
 203}
 204
 205/**
 206 * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
 207 *
 208 * @adev: amdgpu_device pointer
 209 * @vmhub: vmhub type
 210 *
 211 */
 212static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
 213				       uint32_t vmhub)
 214{
 215	return ((vmhub == AMDGPU_MMHUB0(0)) &&
 216		(!amdgpu_sriov_vf(adev)));
 217}
 218
 219static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
 220					struct amdgpu_device *adev,
 221					uint8_t vmid, uint16_t *p_pasid)
 222{
 223	uint32_t value;
 224
 225	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
 226		     + vmid);
 227	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
 228
 229	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
 230}
 231
 232/*
 233 * GART
 234 * VMID 0 is the physical GPU addresses as used by the kernel.
 235 * VMIDs 1-15 are used for userspace clients and are handled
 236 * by the amdgpu vm/hsa code.
 237 */
 238
 239/**
 240 * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
 241 *
 242 * @adev: amdgpu_device pointer
 243 * @vmid: vm instance to flush
 244 * @vmhub: vmhub type
 245 * @flush_type: the flush type
 246 *
 247 * Flush the TLB for the requested page table.
 248 */
 249static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 250					uint32_t vmhub, uint32_t flush_type)
 251{
 252	bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
 253	struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
 254	u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
 255	/* Use register 17 for GART */
 256	const unsigned int eng = 17;
 257	unsigned char hub_ip = 0;
 258	u32 sem, req, ack;
 259	unsigned int i;
 260	u32 tmp;
 261
 262	sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng;
 263	req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
 264	ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
 265
 266	/* flush hdp cache */
 267	adev->hdp.funcs->flush_hdp(adev, NULL);
 268
 269	/* This is necessary for SRIOV as well as for GFXOFF to function
 270	 * properly under bare metal
 271	 */
 272	if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes &&
 273	    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
 274		amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
 275						 1 << vmid, GET_INST(GC, 0));
 276		return;
 277	}
 278
 279	/* This path is needed before KIQ/MES/GFXOFF are set up */
 280	hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP;
 281
 282	spin_lock(&adev->gmc.invalidate_lock);
 283	/*
 284	 * It may lose gpuvm invalidate acknowldege state across power-gating
 285	 * off cycle, add semaphore acquire before invalidation and semaphore
 286	 * release after invalidation to avoid entering power gated state
 287	 * to WA the Issue
 288	 */
 289
 290	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 291	if (use_semaphore) {
 292		for (i = 0; i < adev->usec_timeout; i++) {
 293			/* a read return value of 1 means semaphore acuqire */
 294			tmp = RREG32_RLC_NO_KIQ(sem, hub_ip);
 295			if (tmp & 0x1)
 296				break;
 297			udelay(1);
 298		}
 299
 300		if (i >= adev->usec_timeout)
 301			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
 302	}
 303
 304	WREG32_RLC_NO_KIQ(req, inv_req, hub_ip);
 305
 306	/*
 307	 * Issue a dummy read to wait for the ACK register to be cleared
 308	 * to avoid a false ACK due to the new fast GRBM interface.
 309	 */
 310	if ((vmhub == AMDGPU_GFXHUB(0)) &&
 311	    (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 3, 0)))
 312		RREG32_RLC_NO_KIQ(req, hub_ip);
 313
 314	/* Wait for ACK with a delay.*/
 315	for (i = 0; i < adev->usec_timeout; i++) {
 316		tmp = RREG32_RLC_NO_KIQ(ack, hub_ip);
 317		tmp &= 1 << vmid;
 318		if (tmp)
 319			break;
 320
 321		udelay(1);
 322	}
 323
 324	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 325	if (use_semaphore)
 326		WREG32_RLC_NO_KIQ(sem, 0, hub_ip);
 327
 328	spin_unlock(&adev->gmc.invalidate_lock);
 329
 330	if (i >= adev->usec_timeout)
 331		dev_err(adev->dev, "Timeout waiting for VM flush hub: %d!\n",
 332			vmhub);
 333}
 334
 335/**
 336 * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
 337 *
 338 * @adev: amdgpu_device pointer
 339 * @pasid: pasid to be flush
 340 * @flush_type: the flush type
 341 * @all_hub: Used with PACKET3_INVALIDATE_TLBS_ALL_HUB()
 342 * @inst: is used to select which instance of KIQ to use for the invalidation
 343 *
 344 * Flush the TLB for the requested pasid.
 345 */
 346static void gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
 347					  uint16_t pasid, uint32_t flush_type,
 348					  bool all_hub, uint32_t inst)
 349{
 350	uint16_t queried;
 351	int vmid, i;
 352
 353	for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
 354		bool valid;
 355
 356		valid = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
 357								  &queried);
 358		if (!valid || queried != pasid)
 359			continue;
 360
 361		if (all_hub) {
 362			for_each_set_bit(i, adev->vmhubs_mask,
 363					 AMDGPU_MAX_VMHUBS)
 364				gmc_v10_0_flush_gpu_tlb(adev, vmid, i,
 365							flush_type);
 366		} else {
 367			gmc_v10_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0),
 368						flush_type);
 369		}
 370	}
 371}
 372
 373static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 374					     unsigned int vmid, uint64_t pd_addr)
 375{
 376	bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
 377	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
 378	uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
 379	unsigned int eng = ring->vm_inv_eng;
 380
 381	/*
 382	 * It may lose gpuvm invalidate acknowldege state across power-gating
 383	 * off cycle, add semaphore acquire before invalidation and semaphore
 384	 * release after invalidation to avoid entering power gated state
 385	 * to WA the Issue
 386	 */
 387
 388	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 389	if (use_semaphore)
 390		/* a read return value of 1 means semaphore acuqire */
 391		amdgpu_ring_emit_reg_wait(ring,
 392					  hub->vm_inv_eng0_sem +
 393					  hub->eng_distance * eng, 0x1, 0x1);
 394
 395	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
 396			      (hub->ctx_addr_distance * vmid),
 397			      lower_32_bits(pd_addr));
 398
 399	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
 400			      (hub->ctx_addr_distance * vmid),
 401			      upper_32_bits(pd_addr));
 402
 403	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
 404					    hub->eng_distance * eng,
 405					    hub->vm_inv_eng0_ack +
 406					    hub->eng_distance * eng,
 407					    req, 1 << vmid);
 408
 409	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 410	if (use_semaphore)
 411		/*
 412		 * add semaphore release after invalidation,
 413		 * write with 0 means semaphore release
 414		 */
 415		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
 416				      hub->eng_distance * eng, 0);
 417
 418	return pd_addr;
 419}
 420
 421static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
 422					 unsigned int pasid)
 423{
 424	struct amdgpu_device *adev = ring->adev;
 425	uint32_t reg;
 426
 427	/* MES fw manages IH_VMID_x_LUT updating */
 428	if (ring->is_mes_queue)
 429		return;
 430
 431	if (ring->vm_hub == AMDGPU_GFXHUB(0))
 432		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
 433	else
 434		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
 435
 436	amdgpu_ring_emit_wreg(ring, reg, pasid);
 437}
 438
 439/*
 440 * PTE format on NAVI 10:
 441 * 63:59 reserved
 442 * 58 reserved and for sienna_cichlid is used for MALL noalloc
 443 * 57 reserved
 444 * 56 F
 445 * 55 L
 446 * 54 reserved
 447 * 53:52 SW
 448 * 51 T
 449 * 50:48 mtype
 450 * 47:12 4k physical page base address
 451 * 11:7 fragment
 452 * 6 write
 453 * 5 read
 454 * 4 exe
 455 * 3 Z
 456 * 2 snooped
 457 * 1 system
 458 * 0 valid
 459 *
 460 * PDE format on NAVI 10:
 461 * 63:59 block fragment size
 462 * 58:55 reserved
 463 * 54 P
 464 * 53:48 reserved
 465 * 47:6 physical base address of PD or PTE
 466 * 5:3 reserved
 467 * 2 C
 468 * 1 system
 469 * 0 valid
 470 */
 471
 472static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
 473{
 474	switch (flags) {
 475	case AMDGPU_VM_MTYPE_DEFAULT:
 476		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
 477	case AMDGPU_VM_MTYPE_NC:
 478		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
 479	case AMDGPU_VM_MTYPE_WC:
 480		return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
 481	case AMDGPU_VM_MTYPE_CC:
 482		return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
 483	case AMDGPU_VM_MTYPE_UC:
 484		return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
 485	default:
 486		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
 487	}
 488}
 489
 490static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
 491				 uint64_t *addr, uint64_t *flags)
 492{
 493	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
 494		*addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
 495	BUG_ON(*addr & 0xFFFF00000000003FULL);
 496
 497	if (!adev->gmc.translate_further)
 498		return;
 499
 500	if (level == AMDGPU_VM_PDB1) {
 501		/* Set the block fragment size */
 502		if (!(*flags & AMDGPU_PDE_PTE))
 503			*flags |= AMDGPU_PDE_BFS(0x9);
 504
 505	} else if (level == AMDGPU_VM_PDB0) {
 506		if (*flags & AMDGPU_PDE_PTE)
 507			*flags &= ~AMDGPU_PDE_PTE;
 508		else
 509			*flags |= AMDGPU_PTE_TF;
 510	}
 511}
 512
 513static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
 514				 struct amdgpu_bo_va_mapping *mapping,
 515				 uint64_t *flags)
 516{
 517	struct amdgpu_bo *bo = mapping->bo_va->base.bo;
 518
 519	*flags &= ~AMDGPU_PTE_EXECUTABLE;
 520	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
 521
 522	*flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
 523	*flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
 524
 525	*flags &= ~AMDGPU_PTE_NOALLOC;
 526	*flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
 527
 528	if (mapping->flags & AMDGPU_PTE_PRT) {
 529		*flags |= AMDGPU_PTE_PRT;
 530		*flags |= AMDGPU_PTE_SNOOPED;
 531		*flags |= AMDGPU_PTE_LOG;
 532		*flags |= AMDGPU_PTE_SYSTEM;
 533		*flags &= ~AMDGPU_PTE_VALID;
 534	}
 535
 536	if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
 537			       AMDGPU_GEM_CREATE_EXT_COHERENT |
 538			       AMDGPU_GEM_CREATE_UNCACHED))
 539		*flags = (*flags & ~AMDGPU_PTE_MTYPE_NV10_MASK) |
 540			 AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
 541}
 542
 543static unsigned int gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
 544{
 545	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
 546	unsigned int size;
 547
 548	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
 549		size = AMDGPU_VBIOS_VGA_ALLOCATION;
 550	} else {
 551		u32 viewport;
 552		u32 pitch;
 553
 554		viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
 555		pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
 556		size = (REG_GET_FIELD(viewport,
 557					HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
 558				REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
 559				4);
 560	}
 561
 562	return size;
 563}
 564
 565static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
 566	.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
 567	.flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
 568	.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
 569	.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
 570	.map_mtype = gmc_v10_0_map_mtype,
 571	.get_vm_pde = gmc_v10_0_get_vm_pde,
 572	.get_vm_pte = gmc_v10_0_get_vm_pte,
 573	.get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
 574};
 575
 576static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
 577{
 578	if (adev->gmc.gmc_funcs == NULL)
 579		adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
 580}
 581
 582static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
 583{
 584	switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
 585	case IP_VERSION(8, 7, 0):
 586		adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
 587		adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
 588		adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
 589		adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
 590		adev->umc.retire_unit = 1;
 591		adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
 592		adev->umc.ras = &umc_v8_7_ras;
 593		break;
 594	default:
 595		break;
 596	}
 597}
 598
 599static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
 600{
 601	switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
 602	case IP_VERSION(2, 3, 0):
 603	case IP_VERSION(2, 4, 0):
 604	case IP_VERSION(2, 4, 1):
 605		adev->mmhub.funcs = &mmhub_v2_3_funcs;
 606		break;
 607	default:
 608		adev->mmhub.funcs = &mmhub_v2_0_funcs;
 609		break;
 610	}
 611}
 612
 613static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
 614{
 615	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
 616	case IP_VERSION(10, 3, 0):
 617	case IP_VERSION(10, 3, 2):
 618	case IP_VERSION(10, 3, 1):
 619	case IP_VERSION(10, 3, 4):
 620	case IP_VERSION(10, 3, 5):
 621	case IP_VERSION(10, 3, 6):
 622	case IP_VERSION(10, 3, 3):
 623	case IP_VERSION(10, 3, 7):
 624		adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
 625		break;
 626	default:
 627		adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
 628		break;
 629	}
 630}
 631
 632
 633static int gmc_v10_0_early_init(void *handle)
 634{
 635	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 636
 637	gmc_v10_0_set_mmhub_funcs(adev);
 638	gmc_v10_0_set_gfxhub_funcs(adev);
 639	gmc_v10_0_set_gmc_funcs(adev);
 640	gmc_v10_0_set_irq_funcs(adev);
 641	gmc_v10_0_set_umc_funcs(adev);
 642
 643	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
 644	adev->gmc.shared_aperture_end =
 645		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
 646	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
 647	adev->gmc.private_aperture_end =
 648		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
 649	adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
 650
 651	return 0;
 652}
 653
 654static int gmc_v10_0_late_init(void *handle)
 655{
 656	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 657	int r;
 658
 659	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
 660	if (r)
 661		return r;
 662
 663	r = amdgpu_gmc_ras_late_init(adev);
 664	if (r)
 665		return r;
 666
 667	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
 668}
 669
 670static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
 671					struct amdgpu_gmc *mc)
 672{
 673	u64 base = 0;
 674
 675	base = adev->gfxhub.funcs->get_fb_location(adev);
 676
 677	/* add the xgmi offset of the physical node */
 678	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
 679
 680	amdgpu_gmc_set_agp_default(adev, mc);
 681	amdgpu_gmc_vram_location(adev, &adev->gmc, base);
 682	amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
 683	if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
 684		amdgpu_gmc_agp_location(adev, mc);
 685
 686	/* base offset of vram pages */
 687	adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
 688
 689	/* add the xgmi offset of the physical node */
 690	adev->vm_manager.vram_base_offset +=
 691		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
 692}
 693
 694/**
 695 * gmc_v10_0_mc_init - initialize the memory controller driver params
 696 *
 697 * @adev: amdgpu_device pointer
 698 *
 699 * Look up the amount of vram, vram width, and decide how to place
 700 * vram and gart within the GPU's physical address space.
 701 * Returns 0 for success.
 702 */
 703static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
 704{
 705	int r;
 706
 707	/* size in MB on si */
 708	adev->gmc.mc_vram_size =
 709		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
 710	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
 711
 712	if (!(adev->flags & AMD_IS_APU)) {
 713		r = amdgpu_device_resize_fb_bar(adev);
 714		if (r)
 715			return r;
 716	}
 717	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
 718	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 719
 720#ifdef CONFIG_X86_64
 721	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
 722		adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
 723		adev->gmc.aper_size = adev->gmc.real_vram_size;
 724	}
 725#endif
 726
 727	adev->gmc.visible_vram_size = adev->gmc.aper_size;
 728
 729	/* set the gart size */
 730	if (amdgpu_gart_size == -1) {
 731		switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
 732		default:
 733			adev->gmc.gart_size = 512ULL << 20;
 734			break;
 735		case IP_VERSION(10, 3, 1):   /* DCE SG support */
 736		case IP_VERSION(10, 3, 3):   /* DCE SG support */
 737		case IP_VERSION(10, 3, 6):   /* DCE SG support */
 738		case IP_VERSION(10, 3, 7):   /* DCE SG support */
 739			adev->gmc.gart_size = 1024ULL << 20;
 740			break;
 741		}
 742	} else {
 743		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
 744	}
 745
 746	gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
 747
 748	return 0;
 749}
 750
 751static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
 752{
 753	int r;
 754
 755	if (adev->gart.bo) {
 756		WARN(1, "NAVI10 PCIE GART already initialized\n");
 757		return 0;
 758	}
 759
 760	/* Initialize common gart structure */
 761	r = amdgpu_gart_init(adev);
 762	if (r)
 763		return r;
 764
 765	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 766	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
 767				 AMDGPU_PTE_EXECUTABLE;
 768
 769	return amdgpu_gart_table_vram_alloc(adev);
 770}
 771
 772static int gmc_v10_0_sw_init(void *handle)
 773{
 774	int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
 775	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 776
 777	adev->gfxhub.funcs->init(adev);
 778
 779	adev->mmhub.funcs->init(adev);
 780
 781	spin_lock_init(&adev->gmc.invalidate_lock);
 782
 783	if ((adev->flags & AMD_IS_APU) && amdgpu_emu_mode == 1) {
 784		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
 785		adev->gmc.vram_width = 64;
 786	} else if (amdgpu_emu_mode == 1) {
 787		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
 788		adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
 789	} else {
 790		r = amdgpu_atomfirmware_get_vram_info(adev,
 791				&vram_width, &vram_type, &vram_vendor);
 792		adev->gmc.vram_width = vram_width;
 793
 794		adev->gmc.vram_type = vram_type;
 795		adev->gmc.vram_vendor = vram_vendor;
 796	}
 797
 798	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
 799	case IP_VERSION(10, 3, 0):
 800		adev->gmc.mall_size = 128 * 1024 * 1024;
 801		break;
 802	case IP_VERSION(10, 3, 2):
 803		adev->gmc.mall_size = 96 * 1024 * 1024;
 804		break;
 805	case IP_VERSION(10, 3, 4):
 806		adev->gmc.mall_size = 32 * 1024 * 1024;
 807		break;
 808	case IP_VERSION(10, 3, 5):
 809		adev->gmc.mall_size = 16 * 1024 * 1024;
 810		break;
 811	default:
 812		adev->gmc.mall_size = 0;
 813		break;
 814	}
 815
 816	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
 817	case IP_VERSION(10, 1, 10):
 818	case IP_VERSION(10, 1, 1):
 819	case IP_VERSION(10, 1, 2):
 820	case IP_VERSION(10, 1, 3):
 821	case IP_VERSION(10, 1, 4):
 822	case IP_VERSION(10, 3, 0):
 823	case IP_VERSION(10, 3, 2):
 824	case IP_VERSION(10, 3, 1):
 825	case IP_VERSION(10, 3, 4):
 826	case IP_VERSION(10, 3, 5):
 827	case IP_VERSION(10, 3, 6):
 828	case IP_VERSION(10, 3, 3):
 829	case IP_VERSION(10, 3, 7):
 830		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
 831		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
 832		/*
 833		 * To fulfill 4-level page support,
 834		 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
 835		 * block size 512 (9bit)
 836		 */
 837		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
 838		break;
 839	default:
 840		break;
 841	}
 842
 843	/* This interrupt is VMC page fault.*/
 844	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
 845			      VMC_1_0__SRCID__VM_FAULT,
 846			      &adev->gmc.vm_fault);
 847
 848	if (r)
 849		return r;
 850
 851	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
 852			      UTCL2_1_0__SRCID__FAULT,
 853			      &adev->gmc.vm_fault);
 854	if (r)
 855		return r;
 856
 857	if (!amdgpu_sriov_vf(adev)) {
 858		/* interrupt sent to DF. */
 859		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
 860				      &adev->gmc.ecc_irq);
 861		if (r)
 862			return r;
 863	}
 864
 865	/*
 866	 * Set the internal MC address mask This is the max address of the GPU's
 867	 * internal address space.
 868	 */
 869	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
 870
 871	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
 872	if (r) {
 873		dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
 874		return r;
 875	}
 876
 877	adev->need_swiotlb = drm_need_swiotlb(44);
 878
 879	r = gmc_v10_0_mc_init(adev);
 880	if (r)
 881		return r;
 882
 883	amdgpu_gmc_get_vbios_allocations(adev);
 884
 885	/* Memory manager */
 886	r = amdgpu_bo_init(adev);
 887	if (r)
 888		return r;
 889
 890	r = gmc_v10_0_gart_init(adev);
 891	if (r)
 892		return r;
 893
 894	/*
 895	 * number of VMs
 896	 * VMID 0 is reserved for System
 897	 * amdgpu graphics/compute will use VMIDs 1-7
 898	 * amdkfd will use VMIDs 8-15
 899	 */
 900	adev->vm_manager.first_kfd_vmid = 8;
 901
 902	amdgpu_vm_manager_init(adev);
 903
 904	r = amdgpu_gmc_ras_sw_init(adev);
 905	if (r)
 906		return r;
 907
 908	return 0;
 909}
 910
 911/**
 912 * gmc_v10_0_gart_fini - vm fini callback
 913 *
 914 * @adev: amdgpu_device pointer
 915 *
 916 * Tears down the driver GART/VM setup (CIK).
 917 */
 918static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
 919{
 920	amdgpu_gart_table_vram_free(adev);
 921}
 922
 923static int gmc_v10_0_sw_fini(void *handle)
 924{
 925	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 926
 927	amdgpu_vm_manager_fini(adev);
 928	gmc_v10_0_gart_fini(adev);
 929	amdgpu_gem_force_release(adev);
 930	amdgpu_bo_fini(adev);
 931
 932	return 0;
 933}
 934
 935static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
 936{
 937}
 938
 939/**
 940 * gmc_v10_0_gart_enable - gart enable
 941 *
 942 * @adev: amdgpu_device pointer
 943 */
 944static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
 945{
 946	int r;
 947	bool value;
 948
 949	if (adev->gart.bo == NULL) {
 950		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 951		return -EINVAL;
 952	}
 953
 954	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
 955
 956	if (!adev->in_s0ix) {
 957		r = adev->gfxhub.funcs->gart_enable(adev);
 958		if (r)
 959			return r;
 960	}
 961
 962	r = adev->mmhub.funcs->gart_enable(adev);
 963	if (r)
 964		return r;
 965
 966	adev->hdp.funcs->init_registers(adev);
 967
 968	/* Flush HDP after it is initialized */
 969	adev->hdp.funcs->flush_hdp(adev, NULL);
 970
 971	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
 972		false : true;
 973
 974	if (!adev->in_s0ix)
 975		adev->gfxhub.funcs->set_fault_enable_default(adev, value);
 976	adev->mmhub.funcs->set_fault_enable_default(adev, value);
 977	gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
 978	if (!adev->in_s0ix)
 979		gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
 980
 981	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 982		 (unsigned int)(adev->gmc.gart_size >> 20),
 983		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
 984
 985	return 0;
 986}
 987
 988static int gmc_v10_0_hw_init(void *handle)
 989{
 990	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 991	int r;
 992
 993	adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode;
 994
 995	/* The sequence of these two function calls matters.*/
 996	gmc_v10_0_init_golden_registers(adev);
 997
 998	/*
 999	 * harvestable groups in gc_utcl2 need to be programmed before any GFX block
1000	 * register setup within GMC, or else system hang when harvesting SA.
1001	 */
1002	if (!adev->in_s0ix && adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
1003		adev->gfxhub.funcs->utcl2_harvest(adev);
1004
1005	r = gmc_v10_0_gart_enable(adev);
1006	if (r)
1007		return r;
1008
1009	if (amdgpu_emu_mode == 1) {
1010		r = amdgpu_gmc_vram_checking(adev);
1011		if (r)
1012			return r;
1013	}
1014
1015	if (adev->umc.funcs && adev->umc.funcs->init_registers)
1016		adev->umc.funcs->init_registers(adev);
1017
1018	return 0;
1019}
1020
1021/**
1022 * gmc_v10_0_gart_disable - gart disable
1023 *
1024 * @adev: amdgpu_device pointer
1025 *
1026 * This disables all VM page table.
1027 */
1028static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
1029{
1030	if (!adev->in_s0ix)
1031		adev->gfxhub.funcs->gart_disable(adev);
1032	adev->mmhub.funcs->gart_disable(adev);
1033}
1034
1035static int gmc_v10_0_hw_fini(void *handle)
1036{
1037	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1038
1039	gmc_v10_0_gart_disable(adev);
1040
1041	if (amdgpu_sriov_vf(adev)) {
1042		/* full access mode, so don't touch any GMC register */
1043		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1044		return 0;
1045	}
1046
1047	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1048
1049	if (adev->gmc.ecc_irq.funcs &&
1050		amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
1051		amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1052
1053	return 0;
1054}
1055
1056static int gmc_v10_0_suspend(void *handle)
1057{
1058	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1059
1060	gmc_v10_0_hw_fini(adev);
1061
1062	return 0;
1063}
1064
1065static int gmc_v10_0_resume(void *handle)
1066{
1067	int r;
1068	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1069
1070	r = gmc_v10_0_hw_init(adev);
1071	if (r)
1072		return r;
1073
1074	amdgpu_vmid_reset_all(adev);
1075
1076	return 0;
1077}
1078
1079static bool gmc_v10_0_is_idle(void *handle)
1080{
1081	/* MC is always ready in GMC v10.*/
1082	return true;
1083}
1084
1085static int gmc_v10_0_wait_for_idle(void *handle)
1086{
1087	/* There is no need to wait for MC idle in GMC v10.*/
1088	return 0;
1089}
1090
1091static int gmc_v10_0_soft_reset(void *handle)
1092{
1093	return 0;
1094}
1095
1096static int gmc_v10_0_set_clockgating_state(void *handle,
1097					   enum amd_clockgating_state state)
1098{
1099	int r;
1100	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1101
1102	/*
1103	 * The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled
1104	 * is a new problem observed at DF 3.0.3, however with the same suspend sequence not
1105	 * seen any issue on the DF 3.0.2 series platform.
1106	 */
1107	if (adev->in_s0ix &&
1108	    amdgpu_ip_version(adev, DF_HWIP, 0) > IP_VERSION(3, 0, 2)) {
1109		dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n");
1110		return 0;
1111	}
1112
1113	r = adev->mmhub.funcs->set_clockgating(adev, state);
1114	if (r)
1115		return r;
1116
1117	if (amdgpu_ip_version(adev, ATHUB_HWIP, 0) >= IP_VERSION(2, 1, 0))
1118		return athub_v2_1_set_clockgating(adev, state);
1119	else
1120		return athub_v2_0_set_clockgating(adev, state);
1121}
1122
1123static void gmc_v10_0_get_clockgating_state(void *handle, u64 *flags)
1124{
1125	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1126
1127	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 3) ||
1128	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 4))
1129		return;
1130
1131	adev->mmhub.funcs->get_clockgating(adev, flags);
1132
1133	if (amdgpu_ip_version(adev, ATHUB_HWIP, 0) >= IP_VERSION(2, 1, 0))
1134		athub_v2_1_get_clockgating(adev, flags);
1135	else
1136		athub_v2_0_get_clockgating(adev, flags);
1137}
1138
1139static int gmc_v10_0_set_powergating_state(void *handle,
1140					   enum amd_powergating_state state)
1141{
1142	return 0;
1143}
1144
1145const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1146	.name = "gmc_v10_0",
1147	.early_init = gmc_v10_0_early_init,
1148	.late_init = gmc_v10_0_late_init,
1149	.sw_init = gmc_v10_0_sw_init,
1150	.sw_fini = gmc_v10_0_sw_fini,
1151	.hw_init = gmc_v10_0_hw_init,
1152	.hw_fini = gmc_v10_0_hw_fini,
1153	.suspend = gmc_v10_0_suspend,
1154	.resume = gmc_v10_0_resume,
1155	.is_idle = gmc_v10_0_is_idle,
1156	.wait_for_idle = gmc_v10_0_wait_for_idle,
1157	.soft_reset = gmc_v10_0_soft_reset,
1158	.set_clockgating_state = gmc_v10_0_set_clockgating_state,
1159	.set_powergating_state = gmc_v10_0_set_powergating_state,
1160	.get_clockgating_state = gmc_v10_0_get_clockgating_state,
1161};
1162
1163const struct amdgpu_ip_block_version gmc_v10_0_ip_block = {
1164	.type = AMD_IP_BLOCK_TYPE_GMC,
1165	.major = 10,
1166	.minor = 0,
1167	.rev = 0,
1168	.funcs = &gmc_v10_0_ip_funcs,
1169};
v6.8
   1/*
   2 * Copyright 2019 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include <linux/firmware.h>
  24#include <linux/pci.h>
  25
  26#include <drm/drm_cache.h>
  27
  28#include "amdgpu.h"
  29#include "amdgpu_atomfirmware.h"
  30#include "gmc_v10_0.h"
  31#include "umc_v8_7.h"
  32
  33#include "athub/athub_2_0_0_sh_mask.h"
  34#include "athub/athub_2_0_0_offset.h"
  35#include "dcn/dcn_2_0_0_offset.h"
  36#include "dcn/dcn_2_0_0_sh_mask.h"
  37#include "oss/osssys_5_0_0_offset.h"
  38#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
  39#include "navi10_enum.h"
  40
  41#include "soc15.h"
  42#include "soc15d.h"
  43#include "soc15_common.h"
  44
  45#include "nbio_v2_3.h"
  46
  47#include "gfxhub_v2_0.h"
  48#include "gfxhub_v2_1.h"
  49#include "mmhub_v2_0.h"
  50#include "mmhub_v2_3.h"
  51#include "athub_v2_0.h"
  52#include "athub_v2_1.h"
  53
  54static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
  55					 struct amdgpu_irq_src *src,
  56					 unsigned int type,
  57					 enum amdgpu_interrupt_state state)
  58{
  59	return 0;
  60}
  61
  62static int
  63gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
  64				   struct amdgpu_irq_src *src, unsigned int type,
  65				   enum amdgpu_interrupt_state state)
  66{
  67	switch (state) {
  68	case AMDGPU_IRQ_STATE_DISABLE:
  69		/* MM HUB */
  70		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false);
  71		/* GFX HUB */
  72		/* This works because this interrupt is only
  73		 * enabled at init/resume and disabled in
  74		 * fini/suspend, so the overall state doesn't
  75		 * change over the course of suspend/resume.
  76		 */
  77		if (!adev->in_s0ix)
  78			amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false);
  79		break;
  80	case AMDGPU_IRQ_STATE_ENABLE:
  81		/* MM HUB */
  82		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true);
  83		/* GFX HUB */
  84		/* This works because this interrupt is only
  85		 * enabled at init/resume and disabled in
  86		 * fini/suspend, so the overall state doesn't
  87		 * change over the course of suspend/resume.
  88		 */
  89		if (!adev->in_s0ix)
  90			amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true);
  91		break;
  92	default:
  93		break;
  94	}
  95
  96	return 0;
  97}
  98
  99static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
 100				       struct amdgpu_irq_src *source,
 101				       struct amdgpu_iv_entry *entry)
 102{
 103	uint32_t vmhub_index = entry->client_id == SOC15_IH_CLIENTID_VMC ?
 104			       AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
 105	struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
 106	bool retry_fault = !!(entry->src_data[1] & 0x80);
 107	bool write_fault = !!(entry->src_data[1] & 0x20);
 108	struct amdgpu_task_info task_info;
 109	uint32_t status = 0;
 110	u64 addr;
 111
 112	addr = (u64)entry->src_data[0] << 12;
 113	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
 114
 115	if (retry_fault) {
 116		/* Returning 1 here also prevents sending the IV to the KFD */
 117
 118		/* Process it onyl if it's the first fault for this address */
 119		if (entry->ih != &adev->irq.ih_soft &&
 120		    amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
 121					     entry->timestamp))
 122			return 1;
 123
 124		/* Delegate it to a different ring if the hardware hasn't
 125		 * already done it.
 126		 */
 127		if (entry->ih == &adev->irq.ih) {
 128			amdgpu_irq_delegate(adev, entry, 8);
 129			return 1;
 130		}
 131
 132		/* Try to handle the recoverable page faults by filling page
 133		 * tables
 134		 */
 135		if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, write_fault))
 136			return 1;
 137	}
 138
 139	if (!amdgpu_sriov_vf(adev)) {
 140		/*
 141		 * Issue a dummy read to wait for the status register to
 142		 * be updated to avoid reading an incorrect value due to
 143		 * the new fast GRBM interface.
 144		 */
 145		if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&
 146		    (amdgpu_ip_version(adev, GC_HWIP, 0) <
 147		     IP_VERSION(10, 3, 0)))
 148			RREG32(hub->vm_l2_pro_fault_status);
 149
 150		status = RREG32(hub->vm_l2_pro_fault_status);
 151		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
 152
 153		amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status,
 154					     entry->vmid_src ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0));
 155	}
 156
 157	if (!printk_ratelimit())
 158		return 0;
 159
 160	memset(&task_info, 0, sizeof(struct amdgpu_task_info));
 161	amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
 162
 163	dev_err(adev->dev,
 164		"[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
 165		entry->vmid_src ? "mmhub" : "gfxhub",
 166		entry->src_id, entry->ring_id, entry->vmid,
 167		entry->pasid, task_info.process_name, task_info.tgid,
 168		task_info.task_name, task_info.pid);
 
 
 
 
 
 
 
 169	dev_err(adev->dev, "  in page starting at address 0x%016llx from client 0x%x (%s)\n",
 170		addr, entry->client_id,
 171		soc15_ih_clientid_name[entry->client_id]);
 172
 173	if (!amdgpu_sriov_vf(adev))
 174		hub->vmhub_funcs->print_l2_protection_fault_status(adev,
 175								   status);
 176
 177	return 0;
 178}
 179
 180static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
 181	.set = gmc_v10_0_vm_fault_interrupt_state,
 182	.process = gmc_v10_0_process_interrupt,
 183};
 184
 185static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
 186	.set = gmc_v10_0_ecc_interrupt_state,
 187	.process = amdgpu_umc_process_ecc_irq,
 188};
 189
 190static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
 191{
 192	adev->gmc.vm_fault.num_types = 1;
 193	adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
 194
 195	if (!amdgpu_sriov_vf(adev)) {
 196		adev->gmc.ecc_irq.num_types = 1;
 197		adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
 198	}
 199}
 200
 201/**
 202 * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
 203 *
 204 * @adev: amdgpu_device pointer
 205 * @vmhub: vmhub type
 206 *
 207 */
 208static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
 209				       uint32_t vmhub)
 210{
 211	return ((vmhub == AMDGPU_MMHUB0(0)) &&
 212		(!amdgpu_sriov_vf(adev)));
 213}
 214
 215static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
 216					struct amdgpu_device *adev,
 217					uint8_t vmid, uint16_t *p_pasid)
 218{
 219	uint32_t value;
 220
 221	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
 222		     + vmid);
 223	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
 224
 225	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
 226}
 227
 228/*
 229 * GART
 230 * VMID 0 is the physical GPU addresses as used by the kernel.
 231 * VMIDs 1-15 are used for userspace clients and are handled
 232 * by the amdgpu vm/hsa code.
 233 */
 234
 235/**
 236 * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
 237 *
 238 * @adev: amdgpu_device pointer
 239 * @vmid: vm instance to flush
 240 * @vmhub: vmhub type
 241 * @flush_type: the flush type
 242 *
 243 * Flush the TLB for the requested page table.
 244 */
 245static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 246					uint32_t vmhub, uint32_t flush_type)
 247{
 248	bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
 249	struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
 250	u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
 251	/* Use register 17 for GART */
 252	const unsigned int eng = 17;
 253	unsigned char hub_ip = 0;
 254	u32 sem, req, ack;
 255	unsigned int i;
 256	u32 tmp;
 257
 258	sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng;
 259	req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
 260	ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
 261
 262	/* flush hdp cache */
 263	adev->hdp.funcs->flush_hdp(adev, NULL);
 264
 265	/* For SRIOV run time, driver shouldn't access the register through MMIO
 266	 * Directly use kiq to do the vm invalidation instead
 267	 */
 268	if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes &&
 269	    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
 270		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
 271				1 << vmid, GET_INST(GC, 0));
 272		return;
 273	}
 274
 
 275	hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP;
 276
 277	spin_lock(&adev->gmc.invalidate_lock);
 278	/*
 279	 * It may lose gpuvm invalidate acknowldege state across power-gating
 280	 * off cycle, add semaphore acquire before invalidation and semaphore
 281	 * release after invalidation to avoid entering power gated state
 282	 * to WA the Issue
 283	 */
 284
 285	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 286	if (use_semaphore) {
 287		for (i = 0; i < adev->usec_timeout; i++) {
 288			/* a read return value of 1 means semaphore acuqire */
 289			tmp = RREG32_RLC_NO_KIQ(sem, hub_ip);
 290			if (tmp & 0x1)
 291				break;
 292			udelay(1);
 293		}
 294
 295		if (i >= adev->usec_timeout)
 296			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
 297	}
 298
 299	WREG32_RLC_NO_KIQ(req, inv_req, hub_ip);
 300
 301	/*
 302	 * Issue a dummy read to wait for the ACK register to be cleared
 303	 * to avoid a false ACK due to the new fast GRBM interface.
 304	 */
 305	if ((vmhub == AMDGPU_GFXHUB(0)) &&
 306	    (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 3, 0)))
 307		RREG32_RLC_NO_KIQ(req, hub_ip);
 308
 309	/* Wait for ACK with a delay.*/
 310	for (i = 0; i < adev->usec_timeout; i++) {
 311		tmp = RREG32_RLC_NO_KIQ(ack, hub_ip);
 312		tmp &= 1 << vmid;
 313		if (tmp)
 314			break;
 315
 316		udelay(1);
 317	}
 318
 319	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 320	if (use_semaphore)
 321		WREG32_RLC_NO_KIQ(sem, 0, hub_ip);
 322
 323	spin_unlock(&adev->gmc.invalidate_lock);
 324
 325	if (i >= adev->usec_timeout)
 326		dev_err(adev->dev, "Timeout waiting for VM flush hub: %d!\n",
 327			vmhub);
 328}
 329
 330/**
 331 * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
 332 *
 333 * @adev: amdgpu_device pointer
 334 * @pasid: pasid to be flush
 335 * @flush_type: the flush type
 336 * @all_hub: Used with PACKET3_INVALIDATE_TLBS_ALL_HUB()
 337 * @inst: is used to select which instance of KIQ to use for the invalidation
 338 *
 339 * Flush the TLB for the requested pasid.
 340 */
 341static void gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
 342					  uint16_t pasid, uint32_t flush_type,
 343					  bool all_hub, uint32_t inst)
 344{
 345	uint16_t queried;
 346	int vmid, i;
 347
 348	for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
 349		bool valid;
 350
 351		valid = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
 352								  &queried);
 353		if (!valid || queried != pasid)
 354			continue;
 355
 356		if (all_hub) {
 357			for_each_set_bit(i, adev->vmhubs_mask,
 358					 AMDGPU_MAX_VMHUBS)
 359				gmc_v10_0_flush_gpu_tlb(adev, vmid, i,
 360							flush_type);
 361		} else {
 362			gmc_v10_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0),
 363						flush_type);
 364		}
 365	}
 366}
 367
 368static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 369					     unsigned int vmid, uint64_t pd_addr)
 370{
 371	bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
 372	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
 373	uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
 374	unsigned int eng = ring->vm_inv_eng;
 375
 376	/*
 377	 * It may lose gpuvm invalidate acknowldege state across power-gating
 378	 * off cycle, add semaphore acquire before invalidation and semaphore
 379	 * release after invalidation to avoid entering power gated state
 380	 * to WA the Issue
 381	 */
 382
 383	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 384	if (use_semaphore)
 385		/* a read return value of 1 means semaphore acuqire */
 386		amdgpu_ring_emit_reg_wait(ring,
 387					  hub->vm_inv_eng0_sem +
 388					  hub->eng_distance * eng, 0x1, 0x1);
 389
 390	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
 391			      (hub->ctx_addr_distance * vmid),
 392			      lower_32_bits(pd_addr));
 393
 394	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
 395			      (hub->ctx_addr_distance * vmid),
 396			      upper_32_bits(pd_addr));
 397
 398	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
 399					    hub->eng_distance * eng,
 400					    hub->vm_inv_eng0_ack +
 401					    hub->eng_distance * eng,
 402					    req, 1 << vmid);
 403
 404	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 405	if (use_semaphore)
 406		/*
 407		 * add semaphore release after invalidation,
 408		 * write with 0 means semaphore release
 409		 */
 410		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
 411				      hub->eng_distance * eng, 0);
 412
 413	return pd_addr;
 414}
 415
 416static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
 417					 unsigned int pasid)
 418{
 419	struct amdgpu_device *adev = ring->adev;
 420	uint32_t reg;
 421
 422	/* MES fw manages IH_VMID_x_LUT updating */
 423	if (ring->is_mes_queue)
 424		return;
 425
 426	if (ring->vm_hub == AMDGPU_GFXHUB(0))
 427		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
 428	else
 429		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
 430
 431	amdgpu_ring_emit_wreg(ring, reg, pasid);
 432}
 433
 434/*
 435 * PTE format on NAVI 10:
 436 * 63:59 reserved
 437 * 58 reserved and for sienna_cichlid is used for MALL noalloc
 438 * 57 reserved
 439 * 56 F
 440 * 55 L
 441 * 54 reserved
 442 * 53:52 SW
 443 * 51 T
 444 * 50:48 mtype
 445 * 47:12 4k physical page base address
 446 * 11:7 fragment
 447 * 6 write
 448 * 5 read
 449 * 4 exe
 450 * 3 Z
 451 * 2 snooped
 452 * 1 system
 453 * 0 valid
 454 *
 455 * PDE format on NAVI 10:
 456 * 63:59 block fragment size
 457 * 58:55 reserved
 458 * 54 P
 459 * 53:48 reserved
 460 * 47:6 physical base address of PD or PTE
 461 * 5:3 reserved
 462 * 2 C
 463 * 1 system
 464 * 0 valid
 465 */
 466
 467static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
 468{
 469	switch (flags) {
 470	case AMDGPU_VM_MTYPE_DEFAULT:
 471		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
 472	case AMDGPU_VM_MTYPE_NC:
 473		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
 474	case AMDGPU_VM_MTYPE_WC:
 475		return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
 476	case AMDGPU_VM_MTYPE_CC:
 477		return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
 478	case AMDGPU_VM_MTYPE_UC:
 479		return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
 480	default:
 481		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
 482	}
 483}
 484
 485static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
 486				 uint64_t *addr, uint64_t *flags)
 487{
 488	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
 489		*addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
 490	BUG_ON(*addr & 0xFFFF00000000003FULL);
 491
 492	if (!adev->gmc.translate_further)
 493		return;
 494
 495	if (level == AMDGPU_VM_PDB1) {
 496		/* Set the block fragment size */
 497		if (!(*flags & AMDGPU_PDE_PTE))
 498			*flags |= AMDGPU_PDE_BFS(0x9);
 499
 500	} else if (level == AMDGPU_VM_PDB0) {
 501		if (*flags & AMDGPU_PDE_PTE)
 502			*flags &= ~AMDGPU_PDE_PTE;
 503		else
 504			*flags |= AMDGPU_PTE_TF;
 505	}
 506}
 507
 508static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
 509				 struct amdgpu_bo_va_mapping *mapping,
 510				 uint64_t *flags)
 511{
 512	struct amdgpu_bo *bo = mapping->bo_va->base.bo;
 513
 514	*flags &= ~AMDGPU_PTE_EXECUTABLE;
 515	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
 516
 517	*flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
 518	*flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
 519
 520	*flags &= ~AMDGPU_PTE_NOALLOC;
 521	*flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
 522
 523	if (mapping->flags & AMDGPU_PTE_PRT) {
 524		*flags |= AMDGPU_PTE_PRT;
 525		*flags |= AMDGPU_PTE_SNOOPED;
 526		*flags |= AMDGPU_PTE_LOG;
 527		*flags |= AMDGPU_PTE_SYSTEM;
 528		*flags &= ~AMDGPU_PTE_VALID;
 529	}
 530
 531	if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
 532			       AMDGPU_GEM_CREATE_EXT_COHERENT |
 533			       AMDGPU_GEM_CREATE_UNCACHED))
 534		*flags = (*flags & ~AMDGPU_PTE_MTYPE_NV10_MASK) |
 535			 AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
 536}
 537
 538static unsigned int gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
 539{
 540	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
 541	unsigned int size;
 542
 543	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
 544		size = AMDGPU_VBIOS_VGA_ALLOCATION;
 545	} else {
 546		u32 viewport;
 547		u32 pitch;
 548
 549		viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
 550		pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
 551		size = (REG_GET_FIELD(viewport,
 552					HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
 553				REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
 554				4);
 555	}
 556
 557	return size;
 558}
 559
 560static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
 561	.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
 562	.flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
 563	.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
 564	.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
 565	.map_mtype = gmc_v10_0_map_mtype,
 566	.get_vm_pde = gmc_v10_0_get_vm_pde,
 567	.get_vm_pte = gmc_v10_0_get_vm_pte,
 568	.get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
 569};
 570
 571static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
 572{
 573	if (adev->gmc.gmc_funcs == NULL)
 574		adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
 575}
 576
 577static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
 578{
 579	switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
 580	case IP_VERSION(8, 7, 0):
 581		adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
 582		adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
 583		adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
 584		adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
 585		adev->umc.retire_unit = 1;
 586		adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
 587		adev->umc.ras = &umc_v8_7_ras;
 588		break;
 589	default:
 590		break;
 591	}
 592}
 593
 594static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
 595{
 596	switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
 597	case IP_VERSION(2, 3, 0):
 598	case IP_VERSION(2, 4, 0):
 599	case IP_VERSION(2, 4, 1):
 600		adev->mmhub.funcs = &mmhub_v2_3_funcs;
 601		break;
 602	default:
 603		adev->mmhub.funcs = &mmhub_v2_0_funcs;
 604		break;
 605	}
 606}
 607
 608static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
 609{
 610	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
 611	case IP_VERSION(10, 3, 0):
 612	case IP_VERSION(10, 3, 2):
 613	case IP_VERSION(10, 3, 1):
 614	case IP_VERSION(10, 3, 4):
 615	case IP_VERSION(10, 3, 5):
 616	case IP_VERSION(10, 3, 6):
 617	case IP_VERSION(10, 3, 3):
 618	case IP_VERSION(10, 3, 7):
 619		adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
 620		break;
 621	default:
 622		adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
 623		break;
 624	}
 625}
 626
 627
 628static int gmc_v10_0_early_init(void *handle)
 629{
 630	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 631
 632	gmc_v10_0_set_mmhub_funcs(adev);
 633	gmc_v10_0_set_gfxhub_funcs(adev);
 634	gmc_v10_0_set_gmc_funcs(adev);
 635	gmc_v10_0_set_irq_funcs(adev);
 636	gmc_v10_0_set_umc_funcs(adev);
 637
 638	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
 639	adev->gmc.shared_aperture_end =
 640		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
 641	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
 642	adev->gmc.private_aperture_end =
 643		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
 644	adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
 645
 646	return 0;
 647}
 648
 649static int gmc_v10_0_late_init(void *handle)
 650{
 651	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 652	int r;
 653
 654	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
 655	if (r)
 656		return r;
 657
 658	r = amdgpu_gmc_ras_late_init(adev);
 659	if (r)
 660		return r;
 661
 662	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
 663}
 664
 665static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
 666					struct amdgpu_gmc *mc)
 667{
 668	u64 base = 0;
 669
 670	base = adev->gfxhub.funcs->get_fb_location(adev);
 671
 672	/* add the xgmi offset of the physical node */
 673	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
 674
 675	amdgpu_gmc_set_agp_default(adev, mc);
 676	amdgpu_gmc_vram_location(adev, &adev->gmc, base);
 677	amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
 678	if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
 679		amdgpu_gmc_agp_location(adev, mc);
 680
 681	/* base offset of vram pages */
 682	adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
 683
 684	/* add the xgmi offset of the physical node */
 685	adev->vm_manager.vram_base_offset +=
 686		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
 687}
 688
 689/**
 690 * gmc_v10_0_mc_init - initialize the memory controller driver params
 691 *
 692 * @adev: amdgpu_device pointer
 693 *
 694 * Look up the amount of vram, vram width, and decide how to place
 695 * vram and gart within the GPU's physical address space.
 696 * Returns 0 for success.
 697 */
 698static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
 699{
 700	int r;
 701
 702	/* size in MB on si */
 703	adev->gmc.mc_vram_size =
 704		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
 705	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
 706
 707	if (!(adev->flags & AMD_IS_APU)) {
 708		r = amdgpu_device_resize_fb_bar(adev);
 709		if (r)
 710			return r;
 711	}
 712	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
 713	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 714
 715#ifdef CONFIG_X86_64
 716	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
 717		adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
 718		adev->gmc.aper_size = adev->gmc.real_vram_size;
 719	}
 720#endif
 721
 722	adev->gmc.visible_vram_size = adev->gmc.aper_size;
 723
 724	/* set the gart size */
 725	if (amdgpu_gart_size == -1) {
 726		switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
 727		default:
 728			adev->gmc.gart_size = 512ULL << 20;
 729			break;
 730		case IP_VERSION(10, 3, 1):   /* DCE SG support */
 731		case IP_VERSION(10, 3, 3):   /* DCE SG support */
 732		case IP_VERSION(10, 3, 6):   /* DCE SG support */
 733		case IP_VERSION(10, 3, 7):   /* DCE SG support */
 734			adev->gmc.gart_size = 1024ULL << 20;
 735			break;
 736		}
 737	} else {
 738		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
 739	}
 740
 741	gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
 742
 743	return 0;
 744}
 745
 746static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
 747{
 748	int r;
 749
 750	if (adev->gart.bo) {
 751		WARN(1, "NAVI10 PCIE GART already initialized\n");
 752		return 0;
 753	}
 754
 755	/* Initialize common gart structure */
 756	r = amdgpu_gart_init(adev);
 757	if (r)
 758		return r;
 759
 760	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 761	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
 762				 AMDGPU_PTE_EXECUTABLE;
 763
 764	return amdgpu_gart_table_vram_alloc(adev);
 765}
 766
 767static int gmc_v10_0_sw_init(void *handle)
 768{
 769	int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
 770	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 771
 772	adev->gfxhub.funcs->init(adev);
 773
 774	adev->mmhub.funcs->init(adev);
 775
 776	spin_lock_init(&adev->gmc.invalidate_lock);
 777
 778	if ((adev->flags & AMD_IS_APU) && amdgpu_emu_mode == 1) {
 779		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
 780		adev->gmc.vram_width = 64;
 781	} else if (amdgpu_emu_mode == 1) {
 782		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
 783		adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
 784	} else {
 785		r = amdgpu_atomfirmware_get_vram_info(adev,
 786				&vram_width, &vram_type, &vram_vendor);
 787		adev->gmc.vram_width = vram_width;
 788
 789		adev->gmc.vram_type = vram_type;
 790		adev->gmc.vram_vendor = vram_vendor;
 791	}
 792
 793	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
 794	case IP_VERSION(10, 3, 0):
 795		adev->gmc.mall_size = 128 * 1024 * 1024;
 796		break;
 797	case IP_VERSION(10, 3, 2):
 798		adev->gmc.mall_size = 96 * 1024 * 1024;
 799		break;
 800	case IP_VERSION(10, 3, 4):
 801		adev->gmc.mall_size = 32 * 1024 * 1024;
 802		break;
 803	case IP_VERSION(10, 3, 5):
 804		adev->gmc.mall_size = 16 * 1024 * 1024;
 805		break;
 806	default:
 807		adev->gmc.mall_size = 0;
 808		break;
 809	}
 810
 811	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
 812	case IP_VERSION(10, 1, 10):
 813	case IP_VERSION(10, 1, 1):
 814	case IP_VERSION(10, 1, 2):
 815	case IP_VERSION(10, 1, 3):
 816	case IP_VERSION(10, 1, 4):
 817	case IP_VERSION(10, 3, 0):
 818	case IP_VERSION(10, 3, 2):
 819	case IP_VERSION(10, 3, 1):
 820	case IP_VERSION(10, 3, 4):
 821	case IP_VERSION(10, 3, 5):
 822	case IP_VERSION(10, 3, 6):
 823	case IP_VERSION(10, 3, 3):
 824	case IP_VERSION(10, 3, 7):
 825		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
 826		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
 827		/*
 828		 * To fulfill 4-level page support,
 829		 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
 830		 * block size 512 (9bit)
 831		 */
 832		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
 833		break;
 834	default:
 835		break;
 836	}
 837
 838	/* This interrupt is VMC page fault.*/
 839	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
 840			      VMC_1_0__SRCID__VM_FAULT,
 841			      &adev->gmc.vm_fault);
 842
 843	if (r)
 844		return r;
 845
 846	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
 847			      UTCL2_1_0__SRCID__FAULT,
 848			      &adev->gmc.vm_fault);
 849	if (r)
 850		return r;
 851
 852	if (!amdgpu_sriov_vf(adev)) {
 853		/* interrupt sent to DF. */
 854		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
 855				      &adev->gmc.ecc_irq);
 856		if (r)
 857			return r;
 858	}
 859
 860	/*
 861	 * Set the internal MC address mask This is the max address of the GPU's
 862	 * internal address space.
 863	 */
 864	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
 865
 866	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
 867	if (r) {
 868		dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
 869		return r;
 870	}
 871
 872	adev->need_swiotlb = drm_need_swiotlb(44);
 873
 874	r = gmc_v10_0_mc_init(adev);
 875	if (r)
 876		return r;
 877
 878	amdgpu_gmc_get_vbios_allocations(adev);
 879
 880	/* Memory manager */
 881	r = amdgpu_bo_init(adev);
 882	if (r)
 883		return r;
 884
 885	r = gmc_v10_0_gart_init(adev);
 886	if (r)
 887		return r;
 888
 889	/*
 890	 * number of VMs
 891	 * VMID 0 is reserved for System
 892	 * amdgpu graphics/compute will use VMIDs 1-7
 893	 * amdkfd will use VMIDs 8-15
 894	 */
 895	adev->vm_manager.first_kfd_vmid = 8;
 896
 897	amdgpu_vm_manager_init(adev);
 898
 899	r = amdgpu_gmc_ras_sw_init(adev);
 900	if (r)
 901		return r;
 902
 903	return 0;
 904}
 905
 906/**
 907 * gmc_v10_0_gart_fini - vm fini callback
 908 *
 909 * @adev: amdgpu_device pointer
 910 *
 911 * Tears down the driver GART/VM setup (CIK).
 912 */
 913static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
 914{
 915	amdgpu_gart_table_vram_free(adev);
 916}
 917
 918static int gmc_v10_0_sw_fini(void *handle)
 919{
 920	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 921
 922	amdgpu_vm_manager_fini(adev);
 923	gmc_v10_0_gart_fini(adev);
 924	amdgpu_gem_force_release(adev);
 925	amdgpu_bo_fini(adev);
 926
 927	return 0;
 928}
 929
 930static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
 931{
 932}
 933
 934/**
 935 * gmc_v10_0_gart_enable - gart enable
 936 *
 937 * @adev: amdgpu_device pointer
 938 */
 939static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
 940{
 941	int r;
 942	bool value;
 943
 944	if (adev->gart.bo == NULL) {
 945		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 946		return -EINVAL;
 947	}
 948
 949	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
 950
 951	if (!adev->in_s0ix) {
 952		r = adev->gfxhub.funcs->gart_enable(adev);
 953		if (r)
 954			return r;
 955	}
 956
 957	r = adev->mmhub.funcs->gart_enable(adev);
 958	if (r)
 959		return r;
 960
 961	adev->hdp.funcs->init_registers(adev);
 962
 963	/* Flush HDP after it is initialized */
 964	adev->hdp.funcs->flush_hdp(adev, NULL);
 965
 966	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
 967		false : true;
 968
 969	if (!adev->in_s0ix)
 970		adev->gfxhub.funcs->set_fault_enable_default(adev, value);
 971	adev->mmhub.funcs->set_fault_enable_default(adev, value);
 972	gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
 973	if (!adev->in_s0ix)
 974		gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
 975
 976	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 977		 (unsigned int)(adev->gmc.gart_size >> 20),
 978		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
 979
 980	return 0;
 981}
 982
 983static int gmc_v10_0_hw_init(void *handle)
 984{
 985	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 986	int r;
 987
 988	adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode;
 989
 990	/* The sequence of these two function calls matters.*/
 991	gmc_v10_0_init_golden_registers(adev);
 992
 993	/*
 994	 * harvestable groups in gc_utcl2 need to be programmed before any GFX block
 995	 * register setup within GMC, or else system hang when harvesting SA.
 996	 */
 997	if (!adev->in_s0ix && adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
 998		adev->gfxhub.funcs->utcl2_harvest(adev);
 999
1000	r = gmc_v10_0_gart_enable(adev);
1001	if (r)
1002		return r;
1003
1004	if (amdgpu_emu_mode == 1) {
1005		r = amdgpu_gmc_vram_checking(adev);
1006		if (r)
1007			return r;
1008	}
1009
1010	if (adev->umc.funcs && adev->umc.funcs->init_registers)
1011		adev->umc.funcs->init_registers(adev);
1012
1013	return 0;
1014}
1015
1016/**
1017 * gmc_v10_0_gart_disable - gart disable
1018 *
1019 * @adev: amdgpu_device pointer
1020 *
1021 * This disables all VM page table.
1022 */
1023static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
1024{
1025	if (!adev->in_s0ix)
1026		adev->gfxhub.funcs->gart_disable(adev);
1027	adev->mmhub.funcs->gart_disable(adev);
1028}
1029
1030static int gmc_v10_0_hw_fini(void *handle)
1031{
1032	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1033
1034	gmc_v10_0_gart_disable(adev);
1035
1036	if (amdgpu_sriov_vf(adev)) {
1037		/* full access mode, so don't touch any GMC register */
1038		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1039		return 0;
1040	}
1041
1042	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1043
1044	if (adev->gmc.ecc_irq.funcs &&
1045		amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
1046		amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1047
1048	return 0;
1049}
1050
1051static int gmc_v10_0_suspend(void *handle)
1052{
1053	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1054
1055	gmc_v10_0_hw_fini(adev);
1056
1057	return 0;
1058}
1059
1060static int gmc_v10_0_resume(void *handle)
1061{
1062	int r;
1063	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1064
1065	r = gmc_v10_0_hw_init(adev);
1066	if (r)
1067		return r;
1068
1069	amdgpu_vmid_reset_all(adev);
1070
1071	return 0;
1072}
1073
1074static bool gmc_v10_0_is_idle(void *handle)
1075{
1076	/* MC is always ready in GMC v10.*/
1077	return true;
1078}
1079
1080static int gmc_v10_0_wait_for_idle(void *handle)
1081{
1082	/* There is no need to wait for MC idle in GMC v10.*/
1083	return 0;
1084}
1085
1086static int gmc_v10_0_soft_reset(void *handle)
1087{
1088	return 0;
1089}
1090
1091static int gmc_v10_0_set_clockgating_state(void *handle,
1092					   enum amd_clockgating_state state)
1093{
1094	int r;
1095	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1096
1097	/*
1098	 * The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled
1099	 * is a new problem observed at DF 3.0.3, however with the same suspend sequence not
1100	 * seen any issue on the DF 3.0.2 series platform.
1101	 */
1102	if (adev->in_s0ix &&
1103	    amdgpu_ip_version(adev, DF_HWIP, 0) > IP_VERSION(3, 0, 2)) {
1104		dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n");
1105		return 0;
1106	}
1107
1108	r = adev->mmhub.funcs->set_clockgating(adev, state);
1109	if (r)
1110		return r;
1111
1112	if (amdgpu_ip_version(adev, ATHUB_HWIP, 0) >= IP_VERSION(2, 1, 0))
1113		return athub_v2_1_set_clockgating(adev, state);
1114	else
1115		return athub_v2_0_set_clockgating(adev, state);
1116}
1117
1118static void gmc_v10_0_get_clockgating_state(void *handle, u64 *flags)
1119{
1120	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1121
1122	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 3) ||
1123	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 4))
1124		return;
1125
1126	adev->mmhub.funcs->get_clockgating(adev, flags);
1127
1128	if (amdgpu_ip_version(adev, ATHUB_HWIP, 0) >= IP_VERSION(2, 1, 0))
1129		athub_v2_1_get_clockgating(adev, flags);
1130	else
1131		athub_v2_0_get_clockgating(adev, flags);
1132}
1133
1134static int gmc_v10_0_set_powergating_state(void *handle,
1135					   enum amd_powergating_state state)
1136{
1137	return 0;
1138}
1139
1140const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1141	.name = "gmc_v10_0",
1142	.early_init = gmc_v10_0_early_init,
1143	.late_init = gmc_v10_0_late_init,
1144	.sw_init = gmc_v10_0_sw_init,
1145	.sw_fini = gmc_v10_0_sw_fini,
1146	.hw_init = gmc_v10_0_hw_init,
1147	.hw_fini = gmc_v10_0_hw_fini,
1148	.suspend = gmc_v10_0_suspend,
1149	.resume = gmc_v10_0_resume,
1150	.is_idle = gmc_v10_0_is_idle,
1151	.wait_for_idle = gmc_v10_0_wait_for_idle,
1152	.soft_reset = gmc_v10_0_soft_reset,
1153	.set_clockgating_state = gmc_v10_0_set_clockgating_state,
1154	.set_powergating_state = gmc_v10_0_set_powergating_state,
1155	.get_clockgating_state = gmc_v10_0_get_clockgating_state,
1156};
1157
1158const struct amdgpu_ip_block_version gmc_v10_0_ip_block = {
1159	.type = AMD_IP_BLOCK_TYPE_GMC,
1160	.major = 10,
1161	.minor = 0,
1162	.rev = 0,
1163	.funcs = &gmc_v10_0_ip_funcs,
1164};