Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
Note: File does not exist in v4.6.
   1/*
   2 * Copyright 2021 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include <linux/firmware.h>
  24#include <linux/pci.h>
  25
  26#include <drm/drm_cache.h>
  27
  28#include "amdgpu.h"
  29#include "amdgpu_atomfirmware.h"
  30#include "gmc_v11_0.h"
  31#include "umc_v8_10.h"
  32#include "athub/athub_3_0_0_sh_mask.h"
  33#include "athub/athub_3_0_0_offset.h"
  34#include "dcn/dcn_3_2_0_offset.h"
  35#include "dcn/dcn_3_2_0_sh_mask.h"
  36#include "oss/osssys_6_0_0_offset.h"
  37#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
  38#include "navi10_enum.h"
  39#include "soc15.h"
  40#include "soc15d.h"
  41#include "soc15_common.h"
  42#include "nbio_v4_3.h"
  43#include "gfxhub_v3_0.h"
  44#include "gfxhub_v3_0_3.h"
  45#include "gfxhub_v11_5_0.h"
  46#include "mmhub_v3_0.h"
  47#include "mmhub_v3_0_1.h"
  48#include "mmhub_v3_0_2.h"
  49#include "mmhub_v3_3.h"
  50#include "athub_v3_0.h"
  51
  52
  53static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev,
  54					 struct amdgpu_irq_src *src,
  55					 unsigned int type,
  56					 enum amdgpu_interrupt_state state)
  57{
  58	return 0;
  59}
  60
  61static int
  62gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
  63				   struct amdgpu_irq_src *src, unsigned int type,
  64				   enum amdgpu_interrupt_state state)
  65{
  66	switch (state) {
  67	case AMDGPU_IRQ_STATE_DISABLE:
  68		/* MM HUB */
  69		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false);
  70		/* GFX HUB */
  71		/* This works because this interrupt is only
  72		 * enabled at init/resume and disabled in
  73		 * fini/suspend, so the overall state doesn't
  74		 * change over the course of suspend/resume.
  75		 */
  76		if (!adev->in_s0ix && (adev->in_runpm || adev->in_suspend ||
  77							   amdgpu_in_reset(adev)))
  78			amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false);
  79		break;
  80	case AMDGPU_IRQ_STATE_ENABLE:
  81		/* MM HUB */
  82		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true);
  83		/* GFX HUB */
  84		/* This works because this interrupt is only
  85		 * enabled at init/resume and disabled in
  86		 * fini/suspend, so the overall state doesn't
  87		 * change over the course of suspend/resume.
  88		 */
  89		if (!adev->in_s0ix)
  90			amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true);
  91		break;
  92	default:
  93		break;
  94	}
  95
  96	return 0;
  97}
  98
  99static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
 100				       struct amdgpu_irq_src *source,
 101				       struct amdgpu_iv_entry *entry)
 102{
 103	uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ?
 104			       AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
 105	struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
 106	uint32_t status = 0;
 107	u64 addr;
 108
 109	addr = (u64)entry->src_data[0] << 12;
 110	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
 111
 112	if (!amdgpu_sriov_vf(adev)) {
 113		/*
 114		 * Issue a dummy read to wait for the status register to
 115		 * be updated to avoid reading an incorrect value due to
 116		 * the new fast GRBM interface.
 117		 */
 118		if (entry->vmid_src == AMDGPU_GFXHUB(0))
 119			RREG32(hub->vm_l2_pro_fault_status);
 120
 121		status = RREG32(hub->vm_l2_pro_fault_status);
 122		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
 123
 124		amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status,
 125					     entry->vmid_src ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0));
 126	}
 127
 128	if (printk_ratelimit()) {
 129		struct amdgpu_task_info task_info;
 130
 131		memset(&task_info, 0, sizeof(struct amdgpu_task_info));
 132		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
 133
 134		dev_err(adev->dev,
 135			"[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
 136			entry->vmid_src ? "mmhub" : "gfxhub",
 137			entry->src_id, entry->ring_id, entry->vmid,
 138			entry->pasid, task_info.process_name, task_info.tgid,
 139			task_info.task_name, task_info.pid);
 140		dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
 141			addr, entry->client_id);
 142		if (!amdgpu_sriov_vf(adev))
 143			hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
 144	}
 145
 146	return 0;
 147}
 148
 149static const struct amdgpu_irq_src_funcs gmc_v11_0_irq_funcs = {
 150	.set = gmc_v11_0_vm_fault_interrupt_state,
 151	.process = gmc_v11_0_process_interrupt,
 152};
 153
 154static const struct amdgpu_irq_src_funcs gmc_v11_0_ecc_funcs = {
 155	.set = gmc_v11_0_ecc_interrupt_state,
 156	.process = amdgpu_umc_process_ecc_irq,
 157};
 158
 159static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev)
 160{
 161	adev->gmc.vm_fault.num_types = 1;
 162	adev->gmc.vm_fault.funcs = &gmc_v11_0_irq_funcs;
 163
 164	if (!amdgpu_sriov_vf(adev)) {
 165		adev->gmc.ecc_irq.num_types = 1;
 166		adev->gmc.ecc_irq.funcs = &gmc_v11_0_ecc_funcs;
 167	}
 168}
 169
 170/**
 171 * gmc_v11_0_use_invalidate_semaphore - judge whether to use semaphore
 172 *
 173 * @adev: amdgpu_device pointer
 174 * @vmhub: vmhub type
 175 *
 176 */
 177static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev,
 178				       uint32_t vmhub)
 179{
 180	return ((vmhub == AMDGPU_MMHUB0(0)) &&
 181		(!amdgpu_sriov_vf(adev)));
 182}
 183
 184static bool gmc_v11_0_get_vmid_pasid_mapping_info(
 185					struct amdgpu_device *adev,
 186					uint8_t vmid, uint16_t *p_pasid)
 187{
 188	*p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff;
 189
 190	return !!(*p_pasid);
 191}
 192
 193/**
 194 * gmc_v11_0_flush_gpu_tlb - gart tlb flush callback
 195 *
 196 * @adev: amdgpu_device pointer
 197 * @vmid: vm instance to flush
 198 * @vmhub: which hub to flush
 199 * @flush_type: the flush type
 200 *
 201 * Flush the TLB for the requested page table.
 202 */
 203static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 204					uint32_t vmhub, uint32_t flush_type)
 205{
 206	bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(adev, vmhub);
 207	struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
 208	u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
 209	/* Use register 17 for GART */
 210	const unsigned int eng = 17;
 211	unsigned char hub_ip;
 212	u32 sem, req, ack;
 213	unsigned int i;
 214	u32 tmp;
 215
 216	if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron)
 217		return;
 218
 219	sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng;
 220	req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
 221	ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
 222
 223	/* flush hdp cache */
 224	adev->hdp.funcs->flush_hdp(adev, NULL);
 225
 226	/* For SRIOV run time, driver shouldn't access the register through MMIO
 227	 * Directly use kiq to do the vm invalidation instead
 228	 */
 229	if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
 230	    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
 231		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
 232				1 << vmid, GET_INST(GC, 0));
 233		return;
 234	}
 235
 236	hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP;
 237
 238	spin_lock(&adev->gmc.invalidate_lock);
 239	/*
 240	 * It may lose gpuvm invalidate acknowldege state across power-gating
 241	 * off cycle, add semaphore acquire before invalidation and semaphore
 242	 * release after invalidation to avoid entering power gated state
 243	 * to WA the Issue
 244	 */
 245
 246	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 247	if (use_semaphore) {
 248		for (i = 0; i < adev->usec_timeout; i++) {
 249			/* a read return value of 1 means semaphore acuqire */
 250			tmp = RREG32_RLC_NO_KIQ(sem, hub_ip);
 251			if (tmp & 0x1)
 252				break;
 253			udelay(1);
 254		}
 255
 256		if (i >= adev->usec_timeout)
 257			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
 258	}
 259
 260	WREG32_RLC_NO_KIQ(req, inv_req, hub_ip);
 261
 262	/* Wait for ACK with a delay.*/
 263	for (i = 0; i < adev->usec_timeout; i++) {
 264		tmp = RREG32_RLC_NO_KIQ(ack, hub_ip);
 265		tmp &= 1 << vmid;
 266		if (tmp)
 267			break;
 268
 269		udelay(1);
 270	}
 271
 272	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 273	if (use_semaphore)
 274		WREG32_RLC_NO_KIQ(sem, 0, hub_ip);
 275
 276	/* Issue additional private vm invalidation to MMHUB */
 277	if ((vmhub != AMDGPU_GFXHUB(0)) &&
 278	    (hub->vm_l2_bank_select_reserved_cid2) &&
 279		!amdgpu_sriov_vf(adev)) {
 280		inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
 281		/* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */
 282		inv_req |= (1 << 25);
 283		/* Issue private invalidation */
 284		WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req);
 285		/* Read back to ensure invalidation is done*/
 286		RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
 287	}
 288
 289	spin_unlock(&adev->gmc.invalidate_lock);
 290
 291	if (i >= adev->usec_timeout)
 292		dev_err(adev->dev, "Timeout waiting for VM flush ACK!\n");
 293}
 294
 295/**
 296 * gmc_v11_0_flush_gpu_tlb_pasid - tlb flush via pasid
 297 *
 298 * @adev: amdgpu_device pointer
 299 * @pasid: pasid to be flush
 300 * @flush_type: the flush type
 301 * @all_hub: flush all hubs
 302 * @inst: is used to select which instance of KIQ to use for the invalidation
 303 *
 304 * Flush the TLB for the requested pasid.
 305 */
 306static void gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
 307					  uint16_t pasid, uint32_t flush_type,
 308					  bool all_hub, uint32_t inst)
 309{
 310	uint16_t queried;
 311	int vmid, i;
 312
 313	for (vmid = 1; vmid < 16; vmid++) {
 314		bool valid;
 315
 316		valid = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid,
 317							      &queried);
 318		if (!valid || queried != pasid)
 319			continue;
 320
 321		if (all_hub) {
 322			for_each_set_bit(i, adev->vmhubs_mask,
 323					 AMDGPU_MAX_VMHUBS)
 324				gmc_v11_0_flush_gpu_tlb(adev, vmid, i,
 325							flush_type);
 326		} else {
 327			gmc_v11_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0),
 328						flush_type);
 329		}
 330	}
 331}
 332
 333static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 334					     unsigned int vmid, uint64_t pd_addr)
 335{
 336	bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
 337	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
 338	uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
 339	unsigned int eng = ring->vm_inv_eng;
 340
 341	/*
 342	 * It may lose gpuvm invalidate acknowldege state across power-gating
 343	 * off cycle, add semaphore acquire before invalidation and semaphore
 344	 * release after invalidation to avoid entering power gated state
 345	 * to WA the Issue
 346	 */
 347
 348	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 349	if (use_semaphore)
 350		/* a read return value of 1 means semaphore acuqire */
 351		amdgpu_ring_emit_reg_wait(ring,
 352					  hub->vm_inv_eng0_sem +
 353					  hub->eng_distance * eng, 0x1, 0x1);
 354
 355	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
 356			      (hub->ctx_addr_distance * vmid),
 357			      lower_32_bits(pd_addr));
 358
 359	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
 360			      (hub->ctx_addr_distance * vmid),
 361			      upper_32_bits(pd_addr));
 362
 363	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
 364					    hub->eng_distance * eng,
 365					    hub->vm_inv_eng0_ack +
 366					    hub->eng_distance * eng,
 367					    req, 1 << vmid);
 368
 369	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 370	if (use_semaphore)
 371		/*
 372		 * add semaphore release after invalidation,
 373		 * write with 0 means semaphore release
 374		 */
 375		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
 376				      hub->eng_distance * eng, 0);
 377
 378	return pd_addr;
 379}
 380
 381static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
 382					 unsigned int pasid)
 383{
 384	struct amdgpu_device *adev = ring->adev;
 385	uint32_t reg;
 386
 387	/* MES fw manages IH_VMID_x_LUT updating */
 388	if (ring->is_mes_queue)
 389		return;
 390
 391	if (ring->vm_hub == AMDGPU_GFXHUB(0))
 392		reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
 393	else
 394		reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
 395
 396	amdgpu_ring_emit_wreg(ring, reg, pasid);
 397}
 398
 399/*
 400 * PTE format:
 401 * 63:59 reserved
 402 * 58:57 reserved
 403 * 56 F
 404 * 55 L
 405 * 54 reserved
 406 * 53:52 SW
 407 * 51 T
 408 * 50:48 mtype
 409 * 47:12 4k physical page base address
 410 * 11:7 fragment
 411 * 6 write
 412 * 5 read
 413 * 4 exe
 414 * 3 Z
 415 * 2 snooped
 416 * 1 system
 417 * 0 valid
 418 *
 419 * PDE format:
 420 * 63:59 block fragment size
 421 * 58:55 reserved
 422 * 54 P
 423 * 53:48 reserved
 424 * 47:6 physical base address of PD or PTE
 425 * 5:3 reserved
 426 * 2 C
 427 * 1 system
 428 * 0 valid
 429 */
 430
 431static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
 432{
 433	switch (flags) {
 434	case AMDGPU_VM_MTYPE_DEFAULT:
 435		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
 436	case AMDGPU_VM_MTYPE_NC:
 437		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
 438	case AMDGPU_VM_MTYPE_WC:
 439		return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
 440	case AMDGPU_VM_MTYPE_CC:
 441		return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
 442	case AMDGPU_VM_MTYPE_UC:
 443		return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
 444	default:
 445		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
 446	}
 447}
 448
 449static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
 450				 uint64_t *addr, uint64_t *flags)
 451{
 452	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
 453		*addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
 454	BUG_ON(*addr & 0xFFFF00000000003FULL);
 455
 456	if (!adev->gmc.translate_further)
 457		return;
 458
 459	if (level == AMDGPU_VM_PDB1) {
 460		/* Set the block fragment size */
 461		if (!(*flags & AMDGPU_PDE_PTE))
 462			*flags |= AMDGPU_PDE_BFS(0x9);
 463
 464	} else if (level == AMDGPU_VM_PDB0) {
 465		if (*flags & AMDGPU_PDE_PTE)
 466			*flags &= ~AMDGPU_PDE_PTE;
 467		else
 468			*flags |= AMDGPU_PTE_TF;
 469	}
 470}
 471
 472static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
 473				 struct amdgpu_bo_va_mapping *mapping,
 474				 uint64_t *flags)
 475{
 476	struct amdgpu_bo *bo = mapping->bo_va->base.bo;
 477
 478	*flags &= ~AMDGPU_PTE_EXECUTABLE;
 479	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
 480
 481	*flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
 482	*flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
 483
 484	*flags &= ~AMDGPU_PTE_NOALLOC;
 485	*flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
 486
 487	if (mapping->flags & AMDGPU_PTE_PRT) {
 488		*flags |= AMDGPU_PTE_PRT;
 489		*flags |= AMDGPU_PTE_SNOOPED;
 490		*flags |= AMDGPU_PTE_LOG;
 491		*flags |= AMDGPU_PTE_SYSTEM;
 492		*flags &= ~AMDGPU_PTE_VALID;
 493	}
 494
 495	if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
 496			       AMDGPU_GEM_CREATE_EXT_COHERENT |
 497			       AMDGPU_GEM_CREATE_UNCACHED))
 498		*flags = (*flags & ~AMDGPU_PTE_MTYPE_NV10_MASK) |
 499			 AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
 500}
 501
 502static unsigned int gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
 503{
 504	u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL);
 505	unsigned int size;
 506
 507	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
 508		size = AMDGPU_VBIOS_VGA_ALLOCATION;
 509	} else {
 510		u32 viewport;
 511		u32 pitch;
 512
 513		viewport = RREG32_SOC15(DCE, 0, regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
 514		pitch = RREG32_SOC15(DCE, 0, regHUBPREQ0_DCSURF_SURFACE_PITCH);
 515		size = (REG_GET_FIELD(viewport,
 516					HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
 517				REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
 518				4);
 519	}
 520
 521	return size;
 522}
 523
 524static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
 525	.flush_gpu_tlb = gmc_v11_0_flush_gpu_tlb,
 526	.flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid,
 527	.emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb,
 528	.emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping,
 529	.map_mtype = gmc_v11_0_map_mtype,
 530	.get_vm_pde = gmc_v11_0_get_vm_pde,
 531	.get_vm_pte = gmc_v11_0_get_vm_pte,
 532	.get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size,
 533};
 534
 535static void gmc_v11_0_set_gmc_funcs(struct amdgpu_device *adev)
 536{
 537	adev->gmc.gmc_funcs = &gmc_v11_0_gmc_funcs;
 538}
 539
 540static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
 541{
 542	switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
 543	case IP_VERSION(8, 10, 0):
 544		adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM;
 545		adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM;
 546		adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
 547		adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
 548		adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM;
 549		if (adev->umc.node_inst_num == 4)
 550			adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl_ext0[0][0][0];
 551		else
 552			adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl[0][0][0];
 553		adev->umc.ras = &umc_v8_10_ras;
 554		break;
 555	case IP_VERSION(8, 11, 0):
 556		break;
 557	default:
 558		break;
 559	}
 560}
 561
 562
 563static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
 564{
 565	switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
 566	case IP_VERSION(3, 0, 1):
 567		adev->mmhub.funcs = &mmhub_v3_0_1_funcs;
 568		break;
 569	case IP_VERSION(3, 0, 2):
 570		adev->mmhub.funcs = &mmhub_v3_0_2_funcs;
 571		break;
 572	case IP_VERSION(3, 3, 0):
 573		adev->mmhub.funcs = &mmhub_v3_3_funcs;
 574		break;
 575	default:
 576		adev->mmhub.funcs = &mmhub_v3_0_funcs;
 577		break;
 578	}
 579}
 580
 581static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)
 582{
 583	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
 584	case IP_VERSION(11, 0, 3):
 585		adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs;
 586		break;
 587	case IP_VERSION(11, 5, 0):
 588		adev->gfxhub.funcs = &gfxhub_v11_5_0_funcs;
 589		break;
 590	default:
 591		adev->gfxhub.funcs = &gfxhub_v3_0_funcs;
 592		break;
 593	}
 594}
 595
 596static int gmc_v11_0_early_init(void *handle)
 597{
 598	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 599
 600	gmc_v11_0_set_gfxhub_funcs(adev);
 601	gmc_v11_0_set_mmhub_funcs(adev);
 602	gmc_v11_0_set_gmc_funcs(adev);
 603	gmc_v11_0_set_irq_funcs(adev);
 604	gmc_v11_0_set_umc_funcs(adev);
 605
 606	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
 607	adev->gmc.shared_aperture_end =
 608		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
 609	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
 610	adev->gmc.private_aperture_end =
 611		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
 612	adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
 613
 614	return 0;
 615}
 616
 617static int gmc_v11_0_late_init(void *handle)
 618{
 619	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 620	int r;
 621
 622	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
 623	if (r)
 624		return r;
 625
 626	r = amdgpu_gmc_ras_late_init(adev);
 627	if (r)
 628		return r;
 629
 630	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
 631}
 632
 633static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
 634					struct amdgpu_gmc *mc)
 635{
 636	u64 base = 0;
 637
 638	base = adev->mmhub.funcs->get_fb_location(adev);
 639
 640	amdgpu_gmc_set_agp_default(adev, mc);
 641	amdgpu_gmc_vram_location(adev, &adev->gmc, base);
 642	amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_HIGH);
 643	if (!amdgpu_sriov_vf(adev) &&
 644	    (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 5, 0)) &&
 645	    (amdgpu_agp == 1))
 646		amdgpu_gmc_agp_location(adev, mc);
 647
 648	/* base offset of vram pages */
 649	if (amdgpu_sriov_vf(adev))
 650		adev->vm_manager.vram_base_offset = 0;
 651	else
 652		adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
 653}
 654
 655/**
 656 * gmc_v11_0_mc_init - initialize the memory controller driver params
 657 *
 658 * @adev: amdgpu_device pointer
 659 *
 660 * Look up the amount of vram, vram width, and decide how to place
 661 * vram and gart within the GPU's physical address space.
 662 * Returns 0 for success.
 663 */
 664static int gmc_v11_0_mc_init(struct amdgpu_device *adev)
 665{
 666	int r;
 667
 668	/* size in MB on si */
 669	adev->gmc.mc_vram_size =
 670		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
 671	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
 672
 673	if (!(adev->flags & AMD_IS_APU)) {
 674		r = amdgpu_device_resize_fb_bar(adev);
 675		if (r)
 676			return r;
 677	}
 678	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
 679	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 680
 681#ifdef CONFIG_X86_64
 682	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
 683		adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
 684		adev->gmc.aper_size = adev->gmc.real_vram_size;
 685	}
 686#endif
 687	/* In case the PCI BAR is larger than the actual amount of vram */
 688	adev->gmc.visible_vram_size = adev->gmc.aper_size;
 689	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
 690		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
 691
 692	/* set the gart size */
 693	if (amdgpu_gart_size == -1)
 694		adev->gmc.gart_size = 512ULL << 20;
 695	else
 696		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
 697
 698	gmc_v11_0_vram_gtt_location(adev, &adev->gmc);
 699
 700	return 0;
 701}
 702
 703static int gmc_v11_0_gart_init(struct amdgpu_device *adev)
 704{
 705	int r;
 706
 707	if (adev->gart.bo) {
 708		WARN(1, "PCIE GART already initialized\n");
 709		return 0;
 710	}
 711
 712	/* Initialize common gart structure */
 713	r = amdgpu_gart_init(adev);
 714	if (r)
 715		return r;
 716
 717	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 718	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
 719				 AMDGPU_PTE_EXECUTABLE;
 720
 721	return amdgpu_gart_table_vram_alloc(adev);
 722}
 723
 724static int gmc_v11_0_sw_init(void *handle)
 725{
 726	int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
 727	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 728
 729	adev->mmhub.funcs->init(adev);
 730
 731	adev->gfxhub.funcs->init(adev);
 732
 733	spin_lock_init(&adev->gmc.invalidate_lock);
 734
 735	r = amdgpu_atomfirmware_get_vram_info(adev,
 736					      &vram_width, &vram_type, &vram_vendor);
 737	adev->gmc.vram_width = vram_width;
 738
 739	adev->gmc.vram_type = vram_type;
 740	adev->gmc.vram_vendor = vram_vendor;
 741
 742	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
 743	case IP_VERSION(11, 0, 0):
 744	case IP_VERSION(11, 0, 1):
 745	case IP_VERSION(11, 0, 2):
 746	case IP_VERSION(11, 0, 3):
 747	case IP_VERSION(11, 0, 4):
 748	case IP_VERSION(11, 5, 0):
 749		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
 750		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
 751		/*
 752		 * To fulfill 4-level page support,
 753		 * vm size is 256TB (48bit), maximum size,
 754		 * block size 512 (9bit)
 755		 */
 756		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
 757		break;
 758	default:
 759		break;
 760	}
 761
 762	/* This interrupt is VMC page fault.*/
 763	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC,
 764			      VMC_1_0__SRCID__VM_FAULT,
 765			      &adev->gmc.vm_fault);
 766
 767	if (r)
 768		return r;
 769
 770	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
 771			      UTCL2_1_0__SRCID__FAULT,
 772			      &adev->gmc.vm_fault);
 773	if (r)
 774		return r;
 775
 776	if (!amdgpu_sriov_vf(adev)) {
 777		/* interrupt sent to DF. */
 778		r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0,
 779				      &adev->gmc.ecc_irq);
 780		if (r)
 781			return r;
 782	}
 783
 784	/*
 785	 * Set the internal MC address mask This is the max address of the GPU's
 786	 * internal address space.
 787	 */
 788	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
 789
 790	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
 791	if (r) {
 792		dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
 793		return r;
 794	}
 795
 796	adev->need_swiotlb = drm_need_swiotlb(44);
 797
 798	r = gmc_v11_0_mc_init(adev);
 799	if (r)
 800		return r;
 801
 802	amdgpu_gmc_get_vbios_allocations(adev);
 803
 804	/* Memory manager */
 805	r = amdgpu_bo_init(adev);
 806	if (r)
 807		return r;
 808
 809	r = gmc_v11_0_gart_init(adev);
 810	if (r)
 811		return r;
 812
 813	/*
 814	 * number of VMs
 815	 * VMID 0 is reserved for System
 816	 * amdgpu graphics/compute will use VMIDs 1-7
 817	 * amdkfd will use VMIDs 8-15
 818	 */
 819	adev->vm_manager.first_kfd_vmid = 8;
 820
 821	amdgpu_vm_manager_init(adev);
 822
 823	r = amdgpu_gmc_ras_sw_init(adev);
 824	if (r)
 825		return r;
 826
 827	return 0;
 828}
 829
 830/**
 831 * gmc_v11_0_gart_fini - vm fini callback
 832 *
 833 * @adev: amdgpu_device pointer
 834 *
 835 * Tears down the driver GART/VM setup (CIK).
 836 */
 837static void gmc_v11_0_gart_fini(struct amdgpu_device *adev)
 838{
 839	amdgpu_gart_table_vram_free(adev);
 840}
 841
 842static int gmc_v11_0_sw_fini(void *handle)
 843{
 844	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 845
 846	amdgpu_vm_manager_fini(adev);
 847	gmc_v11_0_gart_fini(adev);
 848	amdgpu_gem_force_release(adev);
 849	amdgpu_bo_fini(adev);
 850
 851	return 0;
 852}
 853
 854static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev)
 855{
 856	if (amdgpu_sriov_vf(adev)) {
 857		struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
 858
 859		WREG32(hub->vm_contexts_disable, 0);
 860		return;
 861	}
 862}
 863
 864/**
 865 * gmc_v11_0_gart_enable - gart enable
 866 *
 867 * @adev: amdgpu_device pointer
 868 */
 869static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
 870{
 871	int r;
 872	bool value;
 873
 874	if (adev->gart.bo == NULL) {
 875		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 876		return -EINVAL;
 877	}
 878
 879	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
 880
 881	r = adev->mmhub.funcs->gart_enable(adev);
 882	if (r)
 883		return r;
 884
 885	/* Flush HDP after it is initialized */
 886	adev->hdp.funcs->flush_hdp(adev, NULL);
 887
 888	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
 889		false : true;
 890
 891	adev->mmhub.funcs->set_fault_enable_default(adev, value);
 892	gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
 893
 894	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 895		 (unsigned int)(adev->gmc.gart_size >> 20),
 896		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
 897
 898	return 0;
 899}
 900
 901static int gmc_v11_0_hw_init(void *handle)
 902{
 903	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 904	int r;
 905
 906	adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode;
 907
 908	/* The sequence of these two function calls matters.*/
 909	gmc_v11_0_init_golden_registers(adev);
 910
 911	r = gmc_v11_0_gart_enable(adev);
 912	if (r)
 913		return r;
 914
 915	if (adev->umc.funcs && adev->umc.funcs->init_registers)
 916		adev->umc.funcs->init_registers(adev);
 917
 918	return 0;
 919}
 920
 921/**
 922 * gmc_v11_0_gart_disable - gart disable
 923 *
 924 * @adev: amdgpu_device pointer
 925 *
 926 * This disables all VM page table.
 927 */
 928static void gmc_v11_0_gart_disable(struct amdgpu_device *adev)
 929{
 930	adev->mmhub.funcs->gart_disable(adev);
 931}
 932
 933static int gmc_v11_0_hw_fini(void *handle)
 934{
 935	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 936
 937	if (amdgpu_sriov_vf(adev)) {
 938		/* full access mode, so don't touch any GMC register */
 939		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
 940		return 0;
 941	}
 942
 943	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
 944
 945	if (adev->gmc.ecc_irq.funcs &&
 946		amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
 947		amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
 948
 949	gmc_v11_0_gart_disable(adev);
 950
 951	return 0;
 952}
 953
 954static int gmc_v11_0_suspend(void *handle)
 955{
 956	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 957
 958	gmc_v11_0_hw_fini(adev);
 959
 960	return 0;
 961}
 962
 963static int gmc_v11_0_resume(void *handle)
 964{
 965	int r;
 966	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 967
 968	r = gmc_v11_0_hw_init(adev);
 969	if (r)
 970		return r;
 971
 972	amdgpu_vmid_reset_all(adev);
 973
 974	return 0;
 975}
 976
 977static bool gmc_v11_0_is_idle(void *handle)
 978{
 979	/* MC is always ready in GMC v11.*/
 980	return true;
 981}
 982
 983static int gmc_v11_0_wait_for_idle(void *handle)
 984{
 985	/* There is no need to wait for MC idle in GMC v11.*/
 986	return 0;
 987}
 988
 989static int gmc_v11_0_soft_reset(void *handle)
 990{
 991	return 0;
 992}
 993
 994static int gmc_v11_0_set_clockgating_state(void *handle,
 995					   enum amd_clockgating_state state)
 996{
 997	int r;
 998	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 999
1000	r = adev->mmhub.funcs->set_clockgating(adev, state);
1001	if (r)
1002		return r;
1003
1004	return athub_v3_0_set_clockgating(adev, state);
1005}
1006
1007static void gmc_v11_0_get_clockgating_state(void *handle, u64 *flags)
1008{
1009	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1010
1011	adev->mmhub.funcs->get_clockgating(adev, flags);
1012
1013	athub_v3_0_get_clockgating(adev, flags);
1014}
1015
1016static int gmc_v11_0_set_powergating_state(void *handle,
1017					   enum amd_powergating_state state)
1018{
1019	return 0;
1020}
1021
1022const struct amd_ip_funcs gmc_v11_0_ip_funcs = {
1023	.name = "gmc_v11_0",
1024	.early_init = gmc_v11_0_early_init,
1025	.sw_init = gmc_v11_0_sw_init,
1026	.hw_init = gmc_v11_0_hw_init,
1027	.late_init = gmc_v11_0_late_init,
1028	.sw_fini = gmc_v11_0_sw_fini,
1029	.hw_fini = gmc_v11_0_hw_fini,
1030	.suspend = gmc_v11_0_suspend,
1031	.resume = gmc_v11_0_resume,
1032	.is_idle = gmc_v11_0_is_idle,
1033	.wait_for_idle = gmc_v11_0_wait_for_idle,
1034	.soft_reset = gmc_v11_0_soft_reset,
1035	.set_clockgating_state = gmc_v11_0_set_clockgating_state,
1036	.set_powergating_state = gmc_v11_0_set_powergating_state,
1037	.get_clockgating_state = gmc_v11_0_get_clockgating_state,
1038};
1039
1040const struct amdgpu_ip_block_version gmc_v11_0_ip_block = {
1041	.type = AMD_IP_BLOCK_TYPE_GMC,
1042	.major = 11,
1043	.minor = 0,
1044	.rev = 0,
1045	.funcs = &gmc_v11_0_ip_funcs,
1046};