Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.14.15.
   1/*
   2 * Copyright 2022 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/delay.h>
  25#include <linux/firmware.h>
  26#include <linux/module.h>
  27#include <linux/pci.h>
  28
  29#include "amdgpu.h"
  30#include "amdgpu_xcp.h"
  31#include "amdgpu_ucode.h"
  32#include "amdgpu_trace.h"
  33
  34#include "sdma/sdma_4_4_2_offset.h"
  35#include "sdma/sdma_4_4_2_sh_mask.h"
  36
  37#include "soc15_common.h"
  38#include "soc15.h"
  39#include "vega10_sdma_pkt_open.h"
  40
  41#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
  42#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
  43
  44#include "amdgpu_ras.h"
  45
  46MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin");
  47
  48#define WREG32_SDMA(instance, offset, value) \
  49	WREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)), value)
  50#define RREG32_SDMA(instance, offset) \
  51	RREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)))
  52
  53static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev);
  54static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
  55static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev);
  56static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
  57static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);
  58
  59static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
  60		u32 instance, u32 offset)
  61{
  62	u32 dev_inst = GET_INST(SDMA0, instance);
  63
  64	return (adev->reg_offset[SDMA0_HWIP][dev_inst][0] + offset);
  65}
  66
  67static unsigned sdma_v4_4_2_seq_to_irq_id(int seq_num)
  68{
  69	switch (seq_num) {
  70	case 0:
  71		return SOC15_IH_CLIENTID_SDMA0;
  72	case 1:
  73		return SOC15_IH_CLIENTID_SDMA1;
  74	case 2:
  75		return SOC15_IH_CLIENTID_SDMA2;
  76	case 3:
  77		return SOC15_IH_CLIENTID_SDMA3;
  78	default:
  79		return -EINVAL;
  80	}
  81}
  82
  83static int sdma_v4_4_2_irq_id_to_seq(unsigned client_id)
  84{
  85	switch (client_id) {
  86	case SOC15_IH_CLIENTID_SDMA0:
  87		return 0;
  88	case SOC15_IH_CLIENTID_SDMA1:
  89		return 1;
  90	case SOC15_IH_CLIENTID_SDMA2:
  91		return 2;
  92	case SOC15_IH_CLIENTID_SDMA3:
  93		return 3;
  94	default:
  95		return -EINVAL;
  96	}
  97}
  98
  99static void sdma_v4_4_2_inst_init_golden_registers(struct amdgpu_device *adev,
 100						   uint32_t inst_mask)
 101{
 102	u32 val;
 103	int i;
 104
 105	for (i = 0; i < adev->sdma.num_instances; i++) {
 106		val = RREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG);
 107		val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG, NUM_BANKS, 4);
 108		val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG,
 109				    PIPE_INTERLEAVE_SIZE, 0);
 110		WREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG, val);
 111
 112		val = RREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG_READ);
 113		val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG_READ, NUM_BANKS,
 114				    4);
 115		val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG_READ,
 116				    PIPE_INTERLEAVE_SIZE, 0);
 117		WREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG_READ, val);
 118	}
 119}
 120
 121/**
 122 * sdma_v4_4_2_init_microcode - load ucode images from disk
 123 *
 124 * @adev: amdgpu_device pointer
 125 *
 126 * Use the firmware interface to load the ucode images into
 127 * the driver (not loaded into hw).
 128 * Returns 0 on success, error on failure.
 129 */
 130static int sdma_v4_4_2_init_microcode(struct amdgpu_device *adev)
 131{
 132	int ret, i;
 133
 134	for (i = 0; i < adev->sdma.num_instances; i++) {
 135		if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
 136		    IP_VERSION(4, 4, 2)) {
 137			ret = amdgpu_sdma_init_microcode(adev, 0, true);
 138			break;
 139		} else {
 140			ret = amdgpu_sdma_init_microcode(adev, i, false);
 141			if (ret)
 142				return ret;
 143		}
 144	}
 145
 146	return ret;
 147}
 148
 149/**
 150 * sdma_v4_4_2_ring_get_rptr - get the current read pointer
 151 *
 152 * @ring: amdgpu ring pointer
 153 *
 154 * Get the current rptr from the hardware.
 155 */
 156static uint64_t sdma_v4_4_2_ring_get_rptr(struct amdgpu_ring *ring)
 157{
 158	u64 rptr;
 159
 160	/* XXX check if swapping is necessary on BE */
 161	rptr = READ_ONCE(*((u64 *)&ring->adev->wb.wb[ring->rptr_offs]));
 162
 163	DRM_DEBUG("rptr before shift == 0x%016llx\n", rptr);
 164	return rptr >> 2;
 165}
 166
 167/**
 168 * sdma_v4_4_2_ring_get_wptr - get the current write pointer
 169 *
 170 * @ring: amdgpu ring pointer
 171 *
 172 * Get the current wptr from the hardware.
 173 */
 174static uint64_t sdma_v4_4_2_ring_get_wptr(struct amdgpu_ring *ring)
 175{
 176	struct amdgpu_device *adev = ring->adev;
 177	u64 wptr;
 178
 179	if (ring->use_doorbell) {
 180		/* XXX check if swapping is necessary on BE */
 181		wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
 182		DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
 183	} else {
 184		wptr = RREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR_HI);
 185		wptr = wptr << 32;
 186		wptr |= RREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR);
 187		DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n",
 188				ring->me, wptr);
 189	}
 190
 191	return wptr >> 2;
 192}
 193
 194/**
 195 * sdma_v4_4_2_ring_set_wptr - commit the write pointer
 196 *
 197 * @ring: amdgpu ring pointer
 198 *
 199 * Write the wptr back to the hardware.
 200 */
 201static void sdma_v4_4_2_ring_set_wptr(struct amdgpu_ring *ring)
 202{
 203	struct amdgpu_device *adev = ring->adev;
 204
 205	DRM_DEBUG("Setting write pointer\n");
 206	if (ring->use_doorbell) {
 207		u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
 208
 209		DRM_DEBUG("Using doorbell -- "
 210				"wptr_offs == 0x%08x "
 211				"lower_32_bits(ring->wptr) << 2 == 0x%08x "
 212				"upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
 213				ring->wptr_offs,
 214				lower_32_bits(ring->wptr << 2),
 215				upper_32_bits(ring->wptr << 2));
 216		/* XXX check if swapping is necessary on BE */
 217		WRITE_ONCE(*wb, (ring->wptr << 2));
 218		DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
 219				ring->doorbell_index, ring->wptr << 2);
 220		WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
 221	} else {
 222		DRM_DEBUG("Not using doorbell -- "
 223				"regSDMA%i_GFX_RB_WPTR == 0x%08x "
 224				"regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
 225				ring->me,
 226				lower_32_bits(ring->wptr << 2),
 227				ring->me,
 228				upper_32_bits(ring->wptr << 2));
 229		WREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR,
 230			    lower_32_bits(ring->wptr << 2));
 231		WREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR_HI,
 232			    upper_32_bits(ring->wptr << 2));
 233	}
 234}
 235
 236/**
 237 * sdma_v4_4_2_page_ring_get_wptr - get the current write pointer
 238 *
 239 * @ring: amdgpu ring pointer
 240 *
 241 * Get the current wptr from the hardware.
 242 */
 243static uint64_t sdma_v4_4_2_page_ring_get_wptr(struct amdgpu_ring *ring)
 244{
 245	struct amdgpu_device *adev = ring->adev;
 246	u64 wptr;
 247
 248	if (ring->use_doorbell) {
 249		/* XXX check if swapping is necessary on BE */
 250		wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
 251	} else {
 252		wptr = RREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR_HI);
 253		wptr = wptr << 32;
 254		wptr |= RREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR);
 255	}
 256
 257	return wptr >> 2;
 258}
 259
 260/**
 261 * sdma_v4_4_2_page_ring_set_wptr - commit the write pointer
 262 *
 263 * @ring: amdgpu ring pointer
 264 *
 265 * Write the wptr back to the hardware.
 266 */
 267static void sdma_v4_4_2_page_ring_set_wptr(struct amdgpu_ring *ring)
 268{
 269	struct amdgpu_device *adev = ring->adev;
 270
 271	if (ring->use_doorbell) {
 272		u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
 273
 274		/* XXX check if swapping is necessary on BE */
 275		WRITE_ONCE(*wb, (ring->wptr << 2));
 276		WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
 277	} else {
 278		uint64_t wptr = ring->wptr << 2;
 279
 280		WREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR,
 281			    lower_32_bits(wptr));
 282		WREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR_HI,
 283			    upper_32_bits(wptr));
 284	}
 285}
 286
 287static void sdma_v4_4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 288{
 289	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 290	int i;
 291
 292	for (i = 0; i < count; i++)
 293		if (sdma && sdma->burst_nop && (i == 0))
 294			amdgpu_ring_write(ring, ring->funcs->nop |
 295				SDMA_PKT_NOP_HEADER_COUNT(count - 1));
 296		else
 297			amdgpu_ring_write(ring, ring->funcs->nop);
 298}
 299
 300/**
 301 * sdma_v4_4_2_ring_emit_ib - Schedule an IB on the DMA engine
 302 *
 303 * @ring: amdgpu ring pointer
 304 * @job: job to retrieve vmid from
 305 * @ib: IB object to schedule
 306 * @flags: unused
 307 *
 308 * Schedule an IB in the DMA ring.
 309 */
 310static void sdma_v4_4_2_ring_emit_ib(struct amdgpu_ring *ring,
 311				   struct amdgpu_job *job,
 312				   struct amdgpu_ib *ib,
 313				   uint32_t flags)
 314{
 315	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 316
 317	/* IB packet must end on a 8 DW boundary */
 318	sdma_v4_4_2_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
 319
 320	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
 321			  SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
 322	/* base must be 32 byte aligned */
 323	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
 324	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
 325	amdgpu_ring_write(ring, ib->length_dw);
 326	amdgpu_ring_write(ring, 0);
 327	amdgpu_ring_write(ring, 0);
 328
 329}
 330
 331static void sdma_v4_4_2_wait_reg_mem(struct amdgpu_ring *ring,
 332				   int mem_space, int hdp,
 333				   uint32_t addr0, uint32_t addr1,
 334				   uint32_t ref, uint32_t mask,
 335				   uint32_t inv)
 336{
 337	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
 338			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) |
 339			  SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) |
 340			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
 341	if (mem_space) {
 342		/* memory */
 343		amdgpu_ring_write(ring, addr0);
 344		amdgpu_ring_write(ring, addr1);
 345	} else {
 346		/* registers */
 347		amdgpu_ring_write(ring, addr0 << 2);
 348		amdgpu_ring_write(ring, addr1 << 2);
 349	}
 350	amdgpu_ring_write(ring, ref); /* reference */
 351	amdgpu_ring_write(ring, mask); /* mask */
 352	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
 353			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */
 354}
 355
 356/**
 357 * sdma_v4_4_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
 358 *
 359 * @ring: amdgpu ring pointer
 360 *
 361 * Emit an hdp flush packet on the requested DMA ring.
 362 */
 363static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 364{
 365	struct amdgpu_device *adev = ring->adev;
 366	u32 ref_and_mask = 0;
 367	const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
 368
 369	ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
 370
 371	sdma_v4_4_2_wait_reg_mem(ring, 0, 1,
 372			       adev->nbio.funcs->get_hdp_flush_done_offset(adev),
 373			       adev->nbio.funcs->get_hdp_flush_req_offset(adev),
 374			       ref_and_mask, ref_and_mask, 10);
 375}
 376
 377/**
 378 * sdma_v4_4_2_ring_emit_fence - emit a fence on the DMA ring
 379 *
 380 * @ring: amdgpu ring pointer
 381 * @addr: address
 382 * @seq: sequence number
 383 * @flags: fence related flags
 384 *
 385 * Add a DMA fence packet to the ring to write
 386 * the fence seq number and DMA trap packet to generate
 387 * an interrupt if needed.
 388 */
 389static void sdma_v4_4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 390				      unsigned flags)
 391{
 392	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
 393	/* write the fence */
 394	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
 395	/* zero in first two bits */
 396	BUG_ON(addr & 0x3);
 397	amdgpu_ring_write(ring, lower_32_bits(addr));
 398	amdgpu_ring_write(ring, upper_32_bits(addr));
 399	amdgpu_ring_write(ring, lower_32_bits(seq));
 400
 401	/* optionally write high bits as well */
 402	if (write64bit) {
 403		addr += 4;
 404		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
 405		/* zero in first two bits */
 406		BUG_ON(addr & 0x3);
 407		amdgpu_ring_write(ring, lower_32_bits(addr));
 408		amdgpu_ring_write(ring, upper_32_bits(addr));
 409		amdgpu_ring_write(ring, upper_32_bits(seq));
 410	}
 411
 412	/* generate an interrupt */
 413	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
 414	amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
 415}
 416
 417
 418/**
 419 * sdma_v4_4_2_inst_gfx_stop - stop the gfx async dma engines
 420 *
 421 * @adev: amdgpu_device pointer
 422 * @inst_mask: mask of dma engine instances to be disabled
 423 *
 424 * Stop the gfx async dma ring buffers.
 425 */
 426static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
 427				      uint32_t inst_mask)
 428{
 429	struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
 430	u32 doorbell_offset, doorbell;
 431	u32 rb_cntl, ib_cntl;
 432	int i, unset = 0;
 433
 434	for_each_inst(i, inst_mask) {
 435		sdma[i] = &adev->sdma.instance[i].ring;
 436
 437		if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
 438			amdgpu_ttm_set_buffer_funcs_status(adev, false);
 439			unset = 1;
 440		}
 441
 442		rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
 443		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 0);
 444		WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
 445		ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
 446		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0);
 447		WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
 448
 449		if (sdma[i]->use_doorbell) {
 450			doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
 451			doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET);
 452
 453			doorbell = REG_SET_FIELD(doorbell, SDMA_GFX_DOORBELL, ENABLE, 0);
 454			doorbell_offset = REG_SET_FIELD(doorbell_offset,
 455					SDMA_GFX_DOORBELL_OFFSET,
 456					OFFSET, 0);
 457			WREG32_SDMA(i, regSDMA_GFX_DOORBELL, doorbell);
 458			WREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET, doorbell_offset);
 459		}
 460	}
 461}
 462
 463/**
 464 * sdma_v4_4_2_inst_rlc_stop - stop the compute async dma engines
 465 *
 466 * @adev: amdgpu_device pointer
 467 * @inst_mask: mask of dma engine instances to be disabled
 468 *
 469 * Stop the compute async dma queues.
 470 */
 471static void sdma_v4_4_2_inst_rlc_stop(struct amdgpu_device *adev,
 472				      uint32_t inst_mask)
 473{
 474	/* XXX todo */
 475}
 476
 477/**
 478 * sdma_v4_4_2_inst_page_stop - stop the page async dma engines
 479 *
 480 * @adev: amdgpu_device pointer
 481 * @inst_mask: mask of dma engine instances to be disabled
 482 *
 483 * Stop the page async dma ring buffers.
 484 */
 485static void sdma_v4_4_2_inst_page_stop(struct amdgpu_device *adev,
 486				       uint32_t inst_mask)
 487{
 488	struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
 489	u32 rb_cntl, ib_cntl;
 490	int i;
 491	bool unset = false;
 492
 493	for_each_inst(i, inst_mask) {
 494		sdma[i] = &adev->sdma.instance[i].page;
 495
 496		if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
 497			(!unset)) {
 498			amdgpu_ttm_set_buffer_funcs_status(adev, false);
 499			unset = true;
 500		}
 501
 502		rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
 503		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
 504					RB_ENABLE, 0);
 505		WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
 506		ib_cntl = RREG32_SDMA(i, regSDMA_PAGE_IB_CNTL);
 507		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL,
 508					IB_ENABLE, 0);
 509		WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl);
 510	}
 511}
 512
 513/**
 514 * sdma_v4_4_2_inst_ctx_switch_enable - stop the async dma engines context switch
 515 *
 516 * @adev: amdgpu_device pointer
 517 * @enable: enable/disable the DMA MEs context switch.
 518 * @inst_mask: mask of dma engine instances to be enabled
 519 *
 520 * Halt or unhalt the async dma engines context switch.
 521 */
 522static void sdma_v4_4_2_inst_ctx_switch_enable(struct amdgpu_device *adev,
 523					       bool enable, uint32_t inst_mask)
 524{
 525	u32 f32_cntl, phase_quantum = 0;
 526	int i;
 527
 528	if (amdgpu_sdma_phase_quantum) {
 529		unsigned value = amdgpu_sdma_phase_quantum;
 530		unsigned unit = 0;
 531
 532		while (value > (SDMA_PHASE0_QUANTUM__VALUE_MASK >>
 533				SDMA_PHASE0_QUANTUM__VALUE__SHIFT)) {
 534			value = (value + 1) >> 1;
 535			unit++;
 536		}
 537		if (unit > (SDMA_PHASE0_QUANTUM__UNIT_MASK >>
 538			    SDMA_PHASE0_QUANTUM__UNIT__SHIFT)) {
 539			value = (SDMA_PHASE0_QUANTUM__VALUE_MASK >>
 540				 SDMA_PHASE0_QUANTUM__VALUE__SHIFT);
 541			unit = (SDMA_PHASE0_QUANTUM__UNIT_MASK >>
 542				SDMA_PHASE0_QUANTUM__UNIT__SHIFT);
 543			WARN_ONCE(1,
 544			"clamping sdma_phase_quantum to %uK clock cycles\n",
 545				  value << unit);
 546		}
 547		phase_quantum =
 548			value << SDMA_PHASE0_QUANTUM__VALUE__SHIFT |
 549			unit  << SDMA_PHASE0_QUANTUM__UNIT__SHIFT;
 550	}
 551
 552	for_each_inst(i, inst_mask) {
 553		f32_cntl = RREG32_SDMA(i, regSDMA_CNTL);
 554		f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_CNTL,
 555				AUTO_CTXSW_ENABLE, enable ? 1 : 0);
 556		if (enable && amdgpu_sdma_phase_quantum) {
 557			WREG32_SDMA(i, regSDMA_PHASE0_QUANTUM, phase_quantum);
 558			WREG32_SDMA(i, regSDMA_PHASE1_QUANTUM, phase_quantum);
 559			WREG32_SDMA(i, regSDMA_PHASE2_QUANTUM, phase_quantum);
 560		}
 561		WREG32_SDMA(i, regSDMA_CNTL, f32_cntl);
 562
 563		/* Extend page fault timeout to avoid interrupt storm */
 564		WREG32_SDMA(i, regSDMA_UTCL1_TIMEOUT, 0x00800080);
 565	}
 566}
 567
 568/**
 569 * sdma_v4_4_2_inst_enable - stop the async dma engines
 570 *
 571 * @adev: amdgpu_device pointer
 572 * @enable: enable/disable the DMA MEs.
 573 * @inst_mask: mask of dma engine instances to be enabled
 574 *
 575 * Halt or unhalt the async dma engines.
 576 */
 577static void sdma_v4_4_2_inst_enable(struct amdgpu_device *adev, bool enable,
 578				    uint32_t inst_mask)
 579{
 580	u32 f32_cntl;
 581	int i;
 582
 583	if (!enable) {
 584		sdma_v4_4_2_inst_gfx_stop(adev, inst_mask);
 585		sdma_v4_4_2_inst_rlc_stop(adev, inst_mask);
 586		if (adev->sdma.has_page_queue)
 587			sdma_v4_4_2_inst_page_stop(adev, inst_mask);
 588
 589		/* SDMA FW needs to respond to FREEZE requests during reset.
 590		 * Keep it running during reset */
 591		if (!amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
 592			return;
 593	}
 594
 595	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
 596		return;
 597
 598	for_each_inst(i, inst_mask) {
 599		f32_cntl = RREG32_SDMA(i, regSDMA_F32_CNTL);
 600		f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_F32_CNTL, HALT, enable ? 0 : 1);
 601		WREG32_SDMA(i, regSDMA_F32_CNTL, f32_cntl);
 602	}
 603}
 604
 605/*
 606 * sdma_v4_4_2_rb_cntl - get parameters for rb_cntl
 607 */
 608static uint32_t sdma_v4_4_2_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
 609{
 610	/* Set ring buffer size in dwords */
 611	uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
 612
 613	barrier(); /* work around https://bugs.llvm.org/show_bug.cgi?id=42576 */
 614	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
 615#ifdef __BIG_ENDIAN
 616	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
 617	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL,
 618				RPTR_WRITEBACK_SWAP_ENABLE, 1);
 619#endif
 620	return rb_cntl;
 621}
 622
 623/**
 624 * sdma_v4_4_2_gfx_resume - setup and start the async dma engines
 625 *
 626 * @adev: amdgpu_device pointer
 627 * @i: instance to resume
 628 *
 629 * Set up the gfx DMA ring buffers and enable them.
 630 * Returns 0 for success, error for failure.
 631 */
 632static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
 633{
 634	struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
 635	u32 rb_cntl, ib_cntl, wptr_poll_cntl;
 636	u32 wb_offset;
 637	u32 doorbell;
 638	u32 doorbell_offset;
 639	u64 wptr_gpu_addr;
 640
 641	wb_offset = (ring->rptr_offs * 4);
 642
 643	rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
 644	rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl);
 645	WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
 646
 647	/* set the wb address whether it's enabled or not */
 648	WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_HI,
 649	       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
 650	WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_LO,
 651	       lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
 652
 653	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL,
 654				RPTR_WRITEBACK_ENABLE, 1);
 655
 656	WREG32_SDMA(i, regSDMA_GFX_RB_BASE, ring->gpu_addr >> 8);
 657	WREG32_SDMA(i, regSDMA_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
 658
 659	ring->wptr = 0;
 660
 661	/* before programing wptr to a less value, need set minor_ptr_update first */
 662	WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 1);
 663
 664	/* Initialize the ring buffer's read and write pointers */
 665	WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
 666	WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
 667	WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0);
 668	WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0);
 669
 670	doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
 671	doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET);
 672
 673	doorbell = REG_SET_FIELD(doorbell, SDMA_GFX_DOORBELL, ENABLE,
 674				 ring->use_doorbell);
 675	doorbell_offset = REG_SET_FIELD(doorbell_offset,
 676					SDMA_GFX_DOORBELL_OFFSET,
 677					OFFSET, ring->doorbell_index);
 678	WREG32_SDMA(i, regSDMA_GFX_DOORBELL, doorbell);
 679	WREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET, doorbell_offset);
 680
 681	sdma_v4_4_2_ring_set_wptr(ring);
 682
 683	/* set minor_ptr_update to 0 after wptr programed */
 684	WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 0);
 685
 686	/* setup the wptr shadow polling */
 687	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
 688	WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_ADDR_LO,
 689		    lower_32_bits(wptr_gpu_addr));
 690	WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_ADDR_HI,
 691		    upper_32_bits(wptr_gpu_addr));
 692	wptr_poll_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_CNTL);
 693	wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
 694				       SDMA_GFX_RB_WPTR_POLL_CNTL,
 695				       F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
 696	WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
 697
 698	/* enable DMA RB */
 699	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 1);
 700	WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
 701
 702	ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
 703	ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 1);
 704#ifdef __BIG_ENDIAN
 705	ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
 706#endif
 707	/* enable DMA IBs */
 708	WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
 709}
 710
 711/**
 712 * sdma_v4_4_2_page_resume - setup and start the async dma engines
 713 *
 714 * @adev: amdgpu_device pointer
 715 * @i: instance to resume
 716 *
 717 * Set up the page DMA ring buffers and enable them.
 718 * Returns 0 for success, error for failure.
 719 */
 720static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
 721{
 722	struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
 723	u32 rb_cntl, ib_cntl, wptr_poll_cntl;
 724	u32 wb_offset;
 725	u32 doorbell;
 726	u32 doorbell_offset;
 727	u64 wptr_gpu_addr;
 728
 729	wb_offset = (ring->rptr_offs * 4);
 730
 731	rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
 732	rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl);
 733	WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
 734
 735	/* Initialize the ring buffer's read and write pointers */
 736	WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0);
 737	WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0);
 738	WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, 0);
 739	WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, 0);
 740
 741	/* set the wb address whether it's enabled or not */
 742	WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_HI,
 743	       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
 744	WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_LO,
 745	       lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
 746
 747	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
 748				RPTR_WRITEBACK_ENABLE, 1);
 749
 750	WREG32_SDMA(i, regSDMA_PAGE_RB_BASE, ring->gpu_addr >> 8);
 751	WREG32_SDMA(i, regSDMA_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
 752
 753	ring->wptr = 0;
 754
 755	/* before programing wptr to a less value, need set minor_ptr_update first */
 756	WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 1);
 757
 758	doorbell = RREG32_SDMA(i, regSDMA_PAGE_DOORBELL);
 759	doorbell_offset = RREG32_SDMA(i, regSDMA_PAGE_DOORBELL_OFFSET);
 760
 761	doorbell = REG_SET_FIELD(doorbell, SDMA_PAGE_DOORBELL, ENABLE,
 762				 ring->use_doorbell);
 763	doorbell_offset = REG_SET_FIELD(doorbell_offset,
 764					SDMA_PAGE_DOORBELL_OFFSET,
 765					OFFSET, ring->doorbell_index);
 766	WREG32_SDMA(i, regSDMA_PAGE_DOORBELL, doorbell);
 767	WREG32_SDMA(i, regSDMA_PAGE_DOORBELL_OFFSET, doorbell_offset);
 768
 769	/* paging queue doorbell range is setup at sdma_v4_4_2_gfx_resume */
 770	sdma_v4_4_2_page_ring_set_wptr(ring);
 771
 772	/* set minor_ptr_update to 0 after wptr programed */
 773	WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 0);
 774
 775	/* setup the wptr shadow polling */
 776	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
 777	WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_ADDR_LO,
 778		    lower_32_bits(wptr_gpu_addr));
 779	WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_ADDR_HI,
 780		    upper_32_bits(wptr_gpu_addr));
 781	wptr_poll_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_CNTL);
 782	wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
 783				       SDMA_PAGE_RB_WPTR_POLL_CNTL,
 784				       F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
 785	WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
 786
 787	/* enable DMA RB */
 788	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL, RB_ENABLE, 1);
 789	WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
 790
 791	ib_cntl = RREG32_SDMA(i, regSDMA_PAGE_IB_CNTL);
 792	ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, IB_ENABLE, 1);
 793#ifdef __BIG_ENDIAN
 794	ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1);
 795#endif
 796	/* enable DMA IBs */
 797	WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl);
 798}
 799
 800static void sdma_v4_4_2_init_pg(struct amdgpu_device *adev)
 801{
 802
 803}
 804
 805/**
 806 * sdma_v4_4_2_inst_rlc_resume - setup and start the async dma engines
 807 *
 808 * @adev: amdgpu_device pointer
 809 * @inst_mask: mask of dma engine instances to be enabled
 810 *
 811 * Set up the compute DMA queues and enable them.
 812 * Returns 0 for success, error for failure.
 813 */
 814static int sdma_v4_4_2_inst_rlc_resume(struct amdgpu_device *adev,
 815				       uint32_t inst_mask)
 816{
 817	sdma_v4_4_2_init_pg(adev);
 818
 819	return 0;
 820}
 821
 822/**
 823 * sdma_v4_4_2_inst_load_microcode - load the sDMA ME ucode
 824 *
 825 * @adev: amdgpu_device pointer
 826 * @inst_mask: mask of dma engine instances to be enabled
 827 *
 828 * Loads the sDMA0/1 ucode.
 829 * Returns 0 for success, -EINVAL if the ucode is not available.
 830 */
 831static int sdma_v4_4_2_inst_load_microcode(struct amdgpu_device *adev,
 832					   uint32_t inst_mask)
 833{
 834	const struct sdma_firmware_header_v1_0 *hdr;
 835	const __le32 *fw_data;
 836	u32 fw_size;
 837	int i, j;
 838
 839	/* halt the MEs */
 840	sdma_v4_4_2_inst_enable(adev, false, inst_mask);
 841
 842	for_each_inst(i, inst_mask) {
 843		if (!adev->sdma.instance[i].fw)
 844			return -EINVAL;
 845
 846		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
 847		amdgpu_ucode_print_sdma_hdr(&hdr->header);
 848		fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 849
 850		fw_data = (const __le32 *)
 851			(adev->sdma.instance[i].fw->data +
 852				le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 853
 854		WREG32_SDMA(i, regSDMA_UCODE_ADDR, 0);
 855
 856		for (j = 0; j < fw_size; j++)
 857			WREG32_SDMA(i, regSDMA_UCODE_DATA,
 858				    le32_to_cpup(fw_data++));
 859
 860		WREG32_SDMA(i, regSDMA_UCODE_ADDR,
 861			    adev->sdma.instance[i].fw_version);
 862	}
 863
 864	return 0;
 865}
 866
 867/**
 868 * sdma_v4_4_2_inst_start - setup and start the async dma engines
 869 *
 870 * @adev: amdgpu_device pointer
 871 * @inst_mask: mask of dma engine instances to be enabled
 872 *
 873 * Set up the DMA engines and enable them.
 874 * Returns 0 for success, error for failure.
 875 */
 876static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
 877				  uint32_t inst_mask)
 878{
 879	struct amdgpu_ring *ring;
 880	uint32_t tmp_mask;
 881	int i, r = 0;
 882
 883	if (amdgpu_sriov_vf(adev)) {
 884		sdma_v4_4_2_inst_ctx_switch_enable(adev, false, inst_mask);
 885		sdma_v4_4_2_inst_enable(adev, false, inst_mask);
 886	} else {
 887		/* bypass sdma microcode loading on Gopher */
 888		if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP &&
 889		    adev->sdma.instance[0].fw) {
 890			r = sdma_v4_4_2_inst_load_microcode(adev, inst_mask);
 891			if (r)
 892				return r;
 893		}
 894
 895		/* unhalt the MEs */
 896		sdma_v4_4_2_inst_enable(adev, true, inst_mask);
 897		/* enable sdma ring preemption */
 898		sdma_v4_4_2_inst_ctx_switch_enable(adev, true, inst_mask);
 899	}
 900
 901	/* start the gfx rings and rlc compute queues */
 902	tmp_mask = inst_mask;
 903	for_each_inst(i, tmp_mask) {
 904		uint32_t temp;
 905
 906		WREG32_SDMA(i, regSDMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
 907		sdma_v4_4_2_gfx_resume(adev, i);
 908		if (adev->sdma.has_page_queue)
 909			sdma_v4_4_2_page_resume(adev, i);
 910
 911		/* set utc l1 enable flag always to 1 */
 912		temp = RREG32_SDMA(i, regSDMA_CNTL);
 913		temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
 914		/* enable context empty interrupt during initialization */
 915		temp = REG_SET_FIELD(temp, SDMA_CNTL, CTXEMPTY_INT_ENABLE, 1);
 916		WREG32_SDMA(i, regSDMA_CNTL, temp);
 917
 918		if (!amdgpu_sriov_vf(adev)) {
 919			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
 920				/* unhalt engine */
 921				temp = RREG32_SDMA(i, regSDMA_F32_CNTL);
 922				temp = REG_SET_FIELD(temp, SDMA_F32_CNTL, HALT, 0);
 923				WREG32_SDMA(i, regSDMA_F32_CNTL, temp);
 924			}
 925		}
 926	}
 927
 928	if (amdgpu_sriov_vf(adev)) {
 929		sdma_v4_4_2_inst_ctx_switch_enable(adev, true, inst_mask);
 930		sdma_v4_4_2_inst_enable(adev, true, inst_mask);
 931	} else {
 932		r = sdma_v4_4_2_inst_rlc_resume(adev, inst_mask);
 933		if (r)
 934			return r;
 935	}
 936
 937	tmp_mask = inst_mask;
 938	for_each_inst(i, tmp_mask) {
 939		ring = &adev->sdma.instance[i].ring;
 940
 941		r = amdgpu_ring_test_helper(ring);
 942		if (r)
 943			return r;
 944
 945		if (adev->sdma.has_page_queue) {
 946			struct amdgpu_ring *page = &adev->sdma.instance[i].page;
 947
 948			r = amdgpu_ring_test_helper(page);
 949			if (r)
 950				return r;
 951
 952			if (adev->mman.buffer_funcs_ring == page)
 953				amdgpu_ttm_set_buffer_funcs_status(adev, true);
 954		}
 955
 956		if (adev->mman.buffer_funcs_ring == ring)
 957			amdgpu_ttm_set_buffer_funcs_status(adev, true);
 958	}
 959
 960	return r;
 961}
 962
 963/**
 964 * sdma_v4_4_2_ring_test_ring - simple async dma engine test
 965 *
 966 * @ring: amdgpu_ring structure holding ring information
 967 *
 968 * Test the DMA engine by writing using it to write an
 969 * value to memory.
 970 * Returns 0 for success, error for failure.
 971 */
 972static int sdma_v4_4_2_ring_test_ring(struct amdgpu_ring *ring)
 973{
 974	struct amdgpu_device *adev = ring->adev;
 975	unsigned i;
 976	unsigned index;
 977	int r;
 978	u32 tmp;
 979	u64 gpu_addr;
 980
 981	r = amdgpu_device_wb_get(adev, &index);
 982	if (r)
 983		return r;
 984
 985	gpu_addr = adev->wb.gpu_addr + (index * 4);
 986	tmp = 0xCAFEDEAD;
 987	adev->wb.wb[index] = cpu_to_le32(tmp);
 988
 989	r = amdgpu_ring_alloc(ring, 5);
 990	if (r)
 991		goto error_free_wb;
 992
 993	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 994			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
 995	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
 996	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
 997	amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
 998	amdgpu_ring_write(ring, 0xDEADBEEF);
 999	amdgpu_ring_commit(ring);
1000
1001	for (i = 0; i < adev->usec_timeout; i++) {
1002		tmp = le32_to_cpu(adev->wb.wb[index]);
1003		if (tmp == 0xDEADBEEF)
1004			break;
1005		udelay(1);
1006	}
1007
1008	if (i >= adev->usec_timeout)
1009		r = -ETIMEDOUT;
1010
1011error_free_wb:
1012	amdgpu_device_wb_free(adev, index);
1013	return r;
1014}
1015
1016/**
1017 * sdma_v4_4_2_ring_test_ib - test an IB on the DMA engine
1018 *
1019 * @ring: amdgpu_ring structure holding ring information
1020 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1021 *
1022 * Test a simple IB in the DMA ring.
1023 * Returns 0 on success, error on failure.
1024 */
1025static int sdma_v4_4_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1026{
1027	struct amdgpu_device *adev = ring->adev;
1028	struct amdgpu_ib ib;
1029	struct dma_fence *f = NULL;
1030	unsigned index;
1031	long r;
1032	u32 tmp = 0;
1033	u64 gpu_addr;
1034
1035	r = amdgpu_device_wb_get(adev, &index);
1036	if (r)
1037		return r;
1038
1039	gpu_addr = adev->wb.gpu_addr + (index * 4);
1040	tmp = 0xCAFEDEAD;
1041	adev->wb.wb[index] = cpu_to_le32(tmp);
1042	memset(&ib, 0, sizeof(ib));
1043	r = amdgpu_ib_get(adev, NULL, 256,
1044					AMDGPU_IB_POOL_DIRECT, &ib);
1045	if (r)
1046		goto err0;
1047
1048	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1049		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1050	ib.ptr[1] = lower_32_bits(gpu_addr);
1051	ib.ptr[2] = upper_32_bits(gpu_addr);
1052	ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
1053	ib.ptr[4] = 0xDEADBEEF;
1054	ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1055	ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1056	ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1057	ib.length_dw = 8;
1058
1059	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1060	if (r)
1061		goto err1;
1062
1063	r = dma_fence_wait_timeout(f, false, timeout);
1064	if (r == 0) {
1065		r = -ETIMEDOUT;
1066		goto err1;
1067	} else if (r < 0) {
1068		goto err1;
1069	}
1070	tmp = le32_to_cpu(adev->wb.wb[index]);
1071	if (tmp == 0xDEADBEEF)
1072		r = 0;
1073	else
1074		r = -EINVAL;
1075
1076err1:
1077	amdgpu_ib_free(adev, &ib, NULL);
1078	dma_fence_put(f);
1079err0:
1080	amdgpu_device_wb_free(adev, index);
1081	return r;
1082}
1083
1084
1085/**
1086 * sdma_v4_4_2_vm_copy_pte - update PTEs by copying them from the GART
1087 *
1088 * @ib: indirect buffer to fill with commands
1089 * @pe: addr of the page entry
1090 * @src: src addr to copy from
1091 * @count: number of page entries to update
1092 *
1093 * Update PTEs by copying them from the GART using sDMA.
1094 */
1095static void sdma_v4_4_2_vm_copy_pte(struct amdgpu_ib *ib,
1096				  uint64_t pe, uint64_t src,
1097				  unsigned count)
1098{
1099	unsigned bytes = count * 8;
1100
1101	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1102		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1103	ib->ptr[ib->length_dw++] = bytes - 1;
1104	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1105	ib->ptr[ib->length_dw++] = lower_32_bits(src);
1106	ib->ptr[ib->length_dw++] = upper_32_bits(src);
1107	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1108	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1109
1110}
1111
1112/**
1113 * sdma_v4_4_2_vm_write_pte - update PTEs by writing them manually
1114 *
1115 * @ib: indirect buffer to fill with commands
1116 * @pe: addr of the page entry
1117 * @value: dst addr to write into pe
1118 * @count: number of page entries to update
1119 * @incr: increase next addr by incr bytes
1120 *
1121 * Update PTEs by writing them manually using sDMA.
1122 */
1123static void sdma_v4_4_2_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1124				   uint64_t value, unsigned count,
1125				   uint32_t incr)
1126{
1127	unsigned ndw = count * 2;
1128
1129	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1130		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1131	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1132	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1133	ib->ptr[ib->length_dw++] = ndw - 1;
1134	for (; ndw > 0; ndw -= 2) {
1135		ib->ptr[ib->length_dw++] = lower_32_bits(value);
1136		ib->ptr[ib->length_dw++] = upper_32_bits(value);
1137		value += incr;
1138	}
1139}
1140
1141/**
1142 * sdma_v4_4_2_vm_set_pte_pde - update the page tables using sDMA
1143 *
1144 * @ib: indirect buffer to fill with commands
1145 * @pe: addr of the page entry
1146 * @addr: dst addr to write into pe
1147 * @count: number of page entries to update
1148 * @incr: increase next addr by incr bytes
1149 * @flags: access flags
1150 *
1151 * Update the page tables using sDMA.
1152 */
1153static void sdma_v4_4_2_vm_set_pte_pde(struct amdgpu_ib *ib,
1154				     uint64_t pe,
1155				     uint64_t addr, unsigned count,
1156				     uint32_t incr, uint64_t flags)
1157{
1158	/* for physically contiguous pages (vram) */
1159	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1160	ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1161	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1162	ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1163	ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1164	ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1165	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1166	ib->ptr[ib->length_dw++] = incr; /* increment size */
1167	ib->ptr[ib->length_dw++] = 0;
1168	ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1169}
1170
1171/**
1172 * sdma_v4_4_2_ring_pad_ib - pad the IB to the required number of dw
1173 *
1174 * @ring: amdgpu_ring structure holding ring information
1175 * @ib: indirect buffer to fill with padding
1176 */
1177static void sdma_v4_4_2_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1178{
1179	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1180	u32 pad_count;
1181	int i;
1182
1183	pad_count = (-ib->length_dw) & 7;
1184	for (i = 0; i < pad_count; i++)
1185		if (sdma && sdma->burst_nop && (i == 0))
1186			ib->ptr[ib->length_dw++] =
1187				SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1188				SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1189		else
1190			ib->ptr[ib->length_dw++] =
1191				SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1192}
1193
1194
1195/**
1196 * sdma_v4_4_2_ring_emit_pipeline_sync - sync the pipeline
1197 *
1198 * @ring: amdgpu_ring pointer
1199 *
1200 * Make sure all previous operations are completed (CIK).
1201 */
1202static void sdma_v4_4_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1203{
1204	uint32_t seq = ring->fence_drv.sync_seq;
1205	uint64_t addr = ring->fence_drv.gpu_addr;
1206
1207	/* wait for idle */
1208	sdma_v4_4_2_wait_reg_mem(ring, 1, 0,
1209			       addr & 0xfffffffc,
1210			       upper_32_bits(addr) & 0xffffffff,
1211			       seq, 0xffffffff, 4);
1212}
1213
1214
1215/**
1216 * sdma_v4_4_2_ring_emit_vm_flush - vm flush using sDMA
1217 *
1218 * @ring: amdgpu_ring pointer
1219 * @vmid: vmid number to use
1220 * @pd_addr: address
1221 *
1222 * Update the page table base and flush the VM TLB
1223 * using sDMA.
1224 */
1225static void sdma_v4_4_2_ring_emit_vm_flush(struct amdgpu_ring *ring,
1226					 unsigned vmid, uint64_t pd_addr)
1227{
1228	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1229}
1230
1231static void sdma_v4_4_2_ring_emit_wreg(struct amdgpu_ring *ring,
1232				     uint32_t reg, uint32_t val)
1233{
1234	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1235			  SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1236	amdgpu_ring_write(ring, reg);
1237	amdgpu_ring_write(ring, val);
1238}
1239
1240static void sdma_v4_4_2_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1241					 uint32_t val, uint32_t mask)
1242{
1243	sdma_v4_4_2_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
1244}
1245
1246static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)
1247{
1248	switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1249	case IP_VERSION(4, 4, 2):
1250		return false;
1251	default:
1252		return false;
1253	}
1254}
1255
1256static int sdma_v4_4_2_early_init(void *handle)
1257{
1258	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1259	int r;
1260
1261	r = sdma_v4_4_2_init_microcode(adev);
1262	if (r)
1263		return r;
1264
1265	/* TODO: Page queue breaks driver reload under SRIOV */
1266	if (sdma_v4_4_2_fw_support_paging_queue(adev))
1267		adev->sdma.has_page_queue = true;
1268
1269	sdma_v4_4_2_set_ring_funcs(adev);
1270	sdma_v4_4_2_set_buffer_funcs(adev);
1271	sdma_v4_4_2_set_vm_pte_funcs(adev);
1272	sdma_v4_4_2_set_irq_funcs(adev);
1273	sdma_v4_4_2_set_ras_funcs(adev);
1274
1275	return 0;
1276}
1277
1278#if 0
1279static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
1280		void *err_data,
1281		struct amdgpu_iv_entry *entry);
1282#endif
1283
1284static int sdma_v4_4_2_late_init(void *handle)
1285{
1286	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1287#if 0
1288	struct ras_ih_if ih_info = {
1289		.cb = sdma_v4_4_2_process_ras_data_cb,
1290	};
1291#endif
1292	if (!amdgpu_persistent_edc_harvesting_supported(adev))
1293		amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__SDMA);
1294
1295	return 0;
1296}
1297
1298static int sdma_v4_4_2_sw_init(void *handle)
1299{
1300	struct amdgpu_ring *ring;
1301	int r, i;
1302	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1303	u32 aid_id;
1304
1305	/* SDMA trap event */
1306	for (i = 0; i < adev->sdma.num_inst_per_aid; i++) {
1307		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1308				      SDMA0_4_0__SRCID__SDMA_TRAP,
1309				      &adev->sdma.trap_irq);
1310		if (r)
1311			return r;
1312	}
1313
1314	/* SDMA SRAM ECC event */
1315	for (i = 0; i < adev->sdma.num_inst_per_aid; i++) {
1316		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1317				      SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
1318				      &adev->sdma.ecc_irq);
1319		if (r)
1320			return r;
1321	}
1322
1323	/* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/
1324	for (i = 0; i < adev->sdma.num_inst_per_aid; i++) {
1325		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1326				      SDMA0_4_0__SRCID__SDMA_VM_HOLE,
1327				      &adev->sdma.vm_hole_irq);
1328		if (r)
1329			return r;
1330
1331		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1332				      SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID,
1333				      &adev->sdma.doorbell_invalid_irq);
1334		if (r)
1335			return r;
1336
1337		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1338				      SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT,
1339				      &adev->sdma.pool_timeout_irq);
1340		if (r)
1341			return r;
1342
1343		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1344				      SDMA0_4_0__SRCID__SDMA_SRBMWRITE,
1345				      &adev->sdma.srbm_write_irq);
1346		if (r)
1347			return r;
1348	}
1349
1350	for (i = 0; i < adev->sdma.num_instances; i++) {
1351		ring = &adev->sdma.instance[i].ring;
1352		ring->ring_obj = NULL;
1353		ring->use_doorbell = true;
1354		aid_id = adev->sdma.instance[i].aid_id;
1355
1356		DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
1357				ring->use_doorbell?"true":"false");
1358
1359		/* doorbell size is 2 dwords, get DWORD offset */
1360		ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
1361		ring->vm_hub = AMDGPU_MMHUB0(aid_id);
1362
1363		sprintf(ring->name, "sdma%d.%d", aid_id,
1364				i % adev->sdma.num_inst_per_aid);
1365		r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
1366				     AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1367				     AMDGPU_RING_PRIO_DEFAULT, NULL);
1368		if (r)
1369			return r;
1370
1371		if (adev->sdma.has_page_queue) {
1372			ring = &adev->sdma.instance[i].page;
1373			ring->ring_obj = NULL;
1374			ring->use_doorbell = true;
1375
1376			/* doorbell index of page queue is assigned right after
1377			 * gfx queue on the same instance
1378			 */
1379			ring->doorbell_index =
1380				(adev->doorbell_index.sdma_engine[i] + 1) << 1;
1381			ring->vm_hub = AMDGPU_MMHUB0(aid_id);
1382
1383			sprintf(ring->name, "page%d.%d", aid_id,
1384					i % adev->sdma.num_inst_per_aid);
1385			r = amdgpu_ring_init(adev, ring, 1024,
1386					     &adev->sdma.trap_irq,
1387					     AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1388					     AMDGPU_RING_PRIO_DEFAULT, NULL);
1389			if (r)
1390				return r;
1391		}
1392	}
1393
1394	if (amdgpu_sdma_ras_sw_init(adev)) {
1395		dev_err(adev->dev, "fail to initialize sdma ras block\n");
1396		return -EINVAL;
1397	}
1398
1399	return r;
1400}
1401
1402static int sdma_v4_4_2_sw_fini(void *handle)
1403{
1404	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1405	int i;
1406
1407	for (i = 0; i < adev->sdma.num_instances; i++) {
1408		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1409		if (adev->sdma.has_page_queue)
1410			amdgpu_ring_fini(&adev->sdma.instance[i].page);
1411	}
1412
1413	if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2))
1414		amdgpu_sdma_destroy_inst_ctx(adev, true);
1415	else
1416		amdgpu_sdma_destroy_inst_ctx(adev, false);
1417
1418	return 0;
1419}
1420
1421static int sdma_v4_4_2_hw_init(void *handle)
1422{
1423	int r;
1424	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1425	uint32_t inst_mask;
1426
1427	inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
1428	if (!amdgpu_sriov_vf(adev))
1429		sdma_v4_4_2_inst_init_golden_registers(adev, inst_mask);
1430
1431	r = sdma_v4_4_2_inst_start(adev, inst_mask);
1432
1433	return r;
1434}
1435
1436static int sdma_v4_4_2_hw_fini(void *handle)
1437{
1438	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1439	uint32_t inst_mask;
1440	int i;
1441
1442	if (amdgpu_sriov_vf(adev))
1443		return 0;
1444
1445	inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
1446	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
1447		for (i = 0; i < adev->sdma.num_instances; i++) {
1448			amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
1449				       AMDGPU_SDMA_IRQ_INSTANCE0 + i);
1450		}
1451	}
1452
1453	sdma_v4_4_2_inst_ctx_switch_enable(adev, false, inst_mask);
1454	sdma_v4_4_2_inst_enable(adev, false, inst_mask);
1455
1456	return 0;
1457}
1458
1459static int sdma_v4_4_2_set_clockgating_state(void *handle,
1460					     enum amd_clockgating_state state);
1461
1462static int sdma_v4_4_2_suspend(void *handle)
1463{
1464	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1465
1466	if (amdgpu_in_reset(adev))
1467		sdma_v4_4_2_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
1468
1469	return sdma_v4_4_2_hw_fini(adev);
1470}
1471
1472static int sdma_v4_4_2_resume(void *handle)
1473{
1474	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1475
1476	return sdma_v4_4_2_hw_init(adev);
1477}
1478
1479static bool sdma_v4_4_2_is_idle(void *handle)
1480{
1481	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1482	u32 i;
1483
1484	for (i = 0; i < adev->sdma.num_instances; i++) {
1485		u32 tmp = RREG32_SDMA(i, regSDMA_STATUS_REG);
1486
1487		if (!(tmp & SDMA_STATUS_REG__IDLE_MASK))
1488			return false;
1489	}
1490
1491	return true;
1492}
1493
1494static int sdma_v4_4_2_wait_for_idle(void *handle)
1495{
1496	unsigned i, j;
1497	u32 sdma[AMDGPU_MAX_SDMA_INSTANCES];
1498	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1499
1500	for (i = 0; i < adev->usec_timeout; i++) {
1501		for (j = 0; j < adev->sdma.num_instances; j++) {
1502			sdma[j] = RREG32_SDMA(j, regSDMA_STATUS_REG);
1503			if (!(sdma[j] & SDMA_STATUS_REG__IDLE_MASK))
1504				break;
1505		}
1506		if (j == adev->sdma.num_instances)
1507			return 0;
1508		udelay(1);
1509	}
1510	return -ETIMEDOUT;
1511}
1512
1513static int sdma_v4_4_2_soft_reset(void *handle)
1514{
1515	/* todo */
1516
1517	return 0;
1518}
1519
1520static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
1521					struct amdgpu_irq_src *source,
1522					unsigned type,
1523					enum amdgpu_interrupt_state state)
1524{
1525	u32 sdma_cntl;
1526
1527	sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
1528	sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, TRAP_ENABLE,
1529		       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1530	WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
1531
1532	return 0;
1533}
1534
1535static int sdma_v4_4_2_process_trap_irq(struct amdgpu_device *adev,
1536				      struct amdgpu_irq_src *source,
1537				      struct amdgpu_iv_entry *entry)
1538{
1539	uint32_t instance, i;
1540
1541	DRM_DEBUG("IH: SDMA trap\n");
1542	instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
1543
1544	/* Client id gives the SDMA instance in AID. To know the exact SDMA
1545	 * instance, interrupt entry gives the node id which corresponds to the AID instance.
1546	 * Match node id with the AID id associated with the SDMA instance. */
1547	for (i = instance; i < adev->sdma.num_instances;
1548	     i += adev->sdma.num_inst_per_aid) {
1549		if (adev->sdma.instance[i].aid_id ==
1550		    node_id_to_phys_map[entry->node_id])
1551			break;
1552	}
1553
1554	if (i >= adev->sdma.num_instances) {
1555		dev_WARN_ONCE(
1556			adev->dev, 1,
1557			"Couldn't find the right sdma instance in trap handler");
1558		return 0;
1559	}
1560
1561	switch (entry->ring_id) {
1562	case 0:
1563		amdgpu_fence_process(&adev->sdma.instance[i].ring);
1564		break;
1565	default:
1566		break;
1567	}
1568	return 0;
1569}
1570
1571#if 0
1572static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
1573		void *err_data,
1574		struct amdgpu_iv_entry *entry)
1575{
1576	int instance;
1577
1578	/* When “Full RAS” is enabled, the per-IP interrupt sources should
1579	 * be disabled and the driver should only look for the aggregated
1580	 * interrupt via sync flood
1581	 */
1582	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA))
1583		goto out;
1584
1585	instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
1586	if (instance < 0)
1587		goto out;
1588
1589	amdgpu_sdma_process_ras_data_cb(adev, err_data, entry);
1590
1591out:
1592	return AMDGPU_RAS_SUCCESS;
1593}
1594#endif
1595
1596static int sdma_v4_4_2_process_illegal_inst_irq(struct amdgpu_device *adev,
1597					      struct amdgpu_irq_src *source,
1598					      struct amdgpu_iv_entry *entry)
1599{
1600	int instance;
1601
1602	DRM_ERROR("Illegal instruction in SDMA command stream\n");
1603
1604	instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
1605	if (instance < 0)
1606		return 0;
1607
1608	switch (entry->ring_id) {
1609	case 0:
1610		drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
1611		break;
1612	}
1613	return 0;
1614}
1615
1616static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev,
1617					struct amdgpu_irq_src *source,
1618					unsigned type,
1619					enum amdgpu_interrupt_state state)
1620{
1621	u32 sdma_cntl;
1622
1623	sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
1624	switch (state) {
1625	case AMDGPU_IRQ_STATE_DISABLE:
1626		sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL,
1627					  DRAM_ECC_INT_ENABLE, 0);
1628		WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
1629		break;
1630	/* sdma ecc interrupt is enabled by default
1631	 * driver doesn't need to do anything to
1632	 * enable the interrupt */
1633	case AMDGPU_IRQ_STATE_ENABLE:
1634	default:
1635		break;
1636	}
1637
1638	return 0;
1639}
1640
1641static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev,
1642					      struct amdgpu_iv_entry *entry)
1643{
1644	int instance;
1645	struct amdgpu_task_info task_info;
1646	u64 addr;
1647
1648	instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
1649	if (instance < 0 || instance >= adev->sdma.num_instances) {
1650		dev_err(adev->dev, "sdma instance invalid %d\n", instance);
1651		return -EINVAL;
1652	}
1653
1654	addr = (u64)entry->src_data[0] << 12;
1655	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
1656
1657	memset(&task_info, 0, sizeof(struct amdgpu_task_info));
1658	amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
1659
1660	dev_dbg_ratelimited(adev->dev,
1661		   "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u "
1662		   "pasid:%u, for process %s pid %d thread %s pid %d\n",
1663		   instance, addr, entry->src_id, entry->ring_id, entry->vmid,
1664		   entry->pasid, task_info.process_name, task_info.tgid,
1665		   task_info.task_name, task_info.pid);
1666	return 0;
1667}
1668
1669static int sdma_v4_4_2_process_vm_hole_irq(struct amdgpu_device *adev,
1670					      struct amdgpu_irq_src *source,
1671					      struct amdgpu_iv_entry *entry)
1672{
1673	dev_dbg_ratelimited(adev->dev, "MC or SEM address in VM hole\n");
1674	sdma_v4_4_2_print_iv_entry(adev, entry);
1675	return 0;
1676}
1677
1678static int sdma_v4_4_2_process_doorbell_invalid_irq(struct amdgpu_device *adev,
1679					      struct amdgpu_irq_src *source,
1680					      struct amdgpu_iv_entry *entry)
1681{
1682
1683	dev_dbg_ratelimited(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n");
1684	sdma_v4_4_2_print_iv_entry(adev, entry);
1685	return 0;
1686}
1687
1688static int sdma_v4_4_2_process_pool_timeout_irq(struct amdgpu_device *adev,
1689					      struct amdgpu_irq_src *source,
1690					      struct amdgpu_iv_entry *entry)
1691{
1692	dev_dbg_ratelimited(adev->dev,
1693		"Polling register/memory timeout executing POLL_REG/MEM with finite timer\n");
1694	sdma_v4_4_2_print_iv_entry(adev, entry);
1695	return 0;
1696}
1697
1698static int sdma_v4_4_2_process_srbm_write_irq(struct amdgpu_device *adev,
1699					      struct amdgpu_irq_src *source,
1700					      struct amdgpu_iv_entry *entry)
1701{
1702	dev_dbg_ratelimited(adev->dev,
1703		"SDMA gets an Register Write SRBM_WRITE command in non-privilege command buffer\n");
1704	sdma_v4_4_2_print_iv_entry(adev, entry);
1705	return 0;
1706}
1707
1708static void sdma_v4_4_2_inst_update_medium_grain_light_sleep(
1709	struct amdgpu_device *adev, bool enable, uint32_t inst_mask)
1710{
1711	uint32_t data, def;
1712	int i;
1713
1714	/* leave as default if it is not driver controlled */
1715	if (!(adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS))
1716		return;
1717
1718	if (enable) {
1719		for_each_inst(i, inst_mask) {
1720			/* 1-not override: enable sdma mem light sleep */
1721			def = data = RREG32_SDMA(i, regSDMA_POWER_CNTL);
1722			data |= SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1723			if (def != data)
1724				WREG32_SDMA(i, regSDMA_POWER_CNTL, data);
1725		}
1726	} else {
1727		for_each_inst(i, inst_mask) {
1728			/* 0-override:disable sdma mem light sleep */
1729			def = data = RREG32_SDMA(i, regSDMA_POWER_CNTL);
1730			data &= ~SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1731			if (def != data)
1732				WREG32_SDMA(i, regSDMA_POWER_CNTL, data);
1733		}
1734	}
1735}
1736
1737static void sdma_v4_4_2_inst_update_medium_grain_clock_gating(
1738	struct amdgpu_device *adev, bool enable, uint32_t inst_mask)
1739{
1740	uint32_t data, def;
1741	int i;
1742
1743	/* leave as default if it is not driver controlled */
1744	if (!(adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG))
1745		return;
1746
1747	if (enable) {
1748		for_each_inst(i, inst_mask) {
1749			def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL);
1750			data &= ~(SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1751				  SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1752				  SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1753				  SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1754				  SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1755				  SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1756			if (def != data)
1757				WREG32_SDMA(i, regSDMA_CLK_CTRL, data);
1758		}
1759	} else {
1760		for_each_inst(i, inst_mask) {
1761			def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL);
1762			data |= (SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1763				 SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1764				 SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1765				 SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1766				 SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1767				 SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1768			if (def != data)
1769				WREG32_SDMA(i, regSDMA_CLK_CTRL, data);
1770		}
1771	}
1772}
1773
1774static int sdma_v4_4_2_set_clockgating_state(void *handle,
1775					  enum amd_clockgating_state state)
1776{
1777	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1778	uint32_t inst_mask;
1779
1780	if (amdgpu_sriov_vf(adev))
1781		return 0;
1782
1783	inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
1784
1785	sdma_v4_4_2_inst_update_medium_grain_clock_gating(
1786		adev, state == AMD_CG_STATE_GATE, inst_mask);
1787	sdma_v4_4_2_inst_update_medium_grain_light_sleep(
1788		adev, state == AMD_CG_STATE_GATE, inst_mask);
1789	return 0;
1790}
1791
1792static int sdma_v4_4_2_set_powergating_state(void *handle,
1793					  enum amd_powergating_state state)
1794{
1795	return 0;
1796}
1797
1798static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags)
1799{
1800	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1801	int data;
1802
1803	if (amdgpu_sriov_vf(adev))
1804		*flags = 0;
1805
1806	/* AMD_CG_SUPPORT_SDMA_MGCG */
1807	data = RREG32(SOC15_REG_OFFSET(SDMA0, GET_INST(SDMA0, 0), regSDMA_CLK_CTRL));
1808	if (!(data & SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK))
1809		*flags |= AMD_CG_SUPPORT_SDMA_MGCG;
1810
1811	/* AMD_CG_SUPPORT_SDMA_LS */
1812	data = RREG32(SOC15_REG_OFFSET(SDMA0, GET_INST(SDMA0, 0), regSDMA_POWER_CNTL));
1813	if (data & SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
1814		*flags |= AMD_CG_SUPPORT_SDMA_LS;
1815}
1816
1817const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = {
1818	.name = "sdma_v4_4_2",
1819	.early_init = sdma_v4_4_2_early_init,
1820	.late_init = sdma_v4_4_2_late_init,
1821	.sw_init = sdma_v4_4_2_sw_init,
1822	.sw_fini = sdma_v4_4_2_sw_fini,
1823	.hw_init = sdma_v4_4_2_hw_init,
1824	.hw_fini = sdma_v4_4_2_hw_fini,
1825	.suspend = sdma_v4_4_2_suspend,
1826	.resume = sdma_v4_4_2_resume,
1827	.is_idle = sdma_v4_4_2_is_idle,
1828	.wait_for_idle = sdma_v4_4_2_wait_for_idle,
1829	.soft_reset = sdma_v4_4_2_soft_reset,
1830	.set_clockgating_state = sdma_v4_4_2_set_clockgating_state,
1831	.set_powergating_state = sdma_v4_4_2_set_powergating_state,
1832	.get_clockgating_state = sdma_v4_4_2_get_clockgating_state,
1833};
1834
1835static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
1836	.type = AMDGPU_RING_TYPE_SDMA,
1837	.align_mask = 0xff,
1838	.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1839	.support_64bit_ptrs = true,
1840	.get_rptr = sdma_v4_4_2_ring_get_rptr,
1841	.get_wptr = sdma_v4_4_2_ring_get_wptr,
1842	.set_wptr = sdma_v4_4_2_ring_set_wptr,
1843	.emit_frame_size =
1844		6 + /* sdma_v4_4_2_ring_emit_hdp_flush */
1845		3 + /* hdp invalidate */
1846		6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */
1847		/* sdma_v4_4_2_ring_emit_vm_flush */
1848		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1849		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
1850		10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */
1851	.emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */
1852	.emit_ib = sdma_v4_4_2_ring_emit_ib,
1853	.emit_fence = sdma_v4_4_2_ring_emit_fence,
1854	.emit_pipeline_sync = sdma_v4_4_2_ring_emit_pipeline_sync,
1855	.emit_vm_flush = sdma_v4_4_2_ring_emit_vm_flush,
1856	.emit_hdp_flush = sdma_v4_4_2_ring_emit_hdp_flush,
1857	.test_ring = sdma_v4_4_2_ring_test_ring,
1858	.test_ib = sdma_v4_4_2_ring_test_ib,
1859	.insert_nop = sdma_v4_4_2_ring_insert_nop,
1860	.pad_ib = sdma_v4_4_2_ring_pad_ib,
1861	.emit_wreg = sdma_v4_4_2_ring_emit_wreg,
1862	.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
1863	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1864};
1865
1866static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
1867	.type = AMDGPU_RING_TYPE_SDMA,
1868	.align_mask = 0xff,
1869	.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1870	.support_64bit_ptrs = true,
1871	.get_rptr = sdma_v4_4_2_ring_get_rptr,
1872	.get_wptr = sdma_v4_4_2_page_ring_get_wptr,
1873	.set_wptr = sdma_v4_4_2_page_ring_set_wptr,
1874	.emit_frame_size =
1875		6 + /* sdma_v4_4_2_ring_emit_hdp_flush */
1876		3 + /* hdp invalidate */
1877		6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */
1878		/* sdma_v4_4_2_ring_emit_vm_flush */
1879		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1880		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
1881		10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */
1882	.emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */
1883	.emit_ib = sdma_v4_4_2_ring_emit_ib,
1884	.emit_fence = sdma_v4_4_2_ring_emit_fence,
1885	.emit_pipeline_sync = sdma_v4_4_2_ring_emit_pipeline_sync,
1886	.emit_vm_flush = sdma_v4_4_2_ring_emit_vm_flush,
1887	.emit_hdp_flush = sdma_v4_4_2_ring_emit_hdp_flush,
1888	.test_ring = sdma_v4_4_2_ring_test_ring,
1889	.test_ib = sdma_v4_4_2_ring_test_ib,
1890	.insert_nop = sdma_v4_4_2_ring_insert_nop,
1891	.pad_ib = sdma_v4_4_2_ring_pad_ib,
1892	.emit_wreg = sdma_v4_4_2_ring_emit_wreg,
1893	.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
1894	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1895};
1896
1897static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev)
1898{
1899	int i, dev_inst;
1900
1901	for (i = 0; i < adev->sdma.num_instances; i++) {
1902		adev->sdma.instance[i].ring.funcs = &sdma_v4_4_2_ring_funcs;
1903		adev->sdma.instance[i].ring.me = i;
1904		if (adev->sdma.has_page_queue) {
1905			adev->sdma.instance[i].page.funcs =
1906				&sdma_v4_4_2_page_ring_funcs;
1907			adev->sdma.instance[i].page.me = i;
1908		}
1909
1910		dev_inst = GET_INST(SDMA0, i);
1911		/* AID to which SDMA belongs depends on physical instance */
1912		adev->sdma.instance[i].aid_id =
1913			dev_inst / adev->sdma.num_inst_per_aid;
1914	}
1915}
1916
1917static const struct amdgpu_irq_src_funcs sdma_v4_4_2_trap_irq_funcs = {
1918	.set = sdma_v4_4_2_set_trap_irq_state,
1919	.process = sdma_v4_4_2_process_trap_irq,
1920};
1921
1922static const struct amdgpu_irq_src_funcs sdma_v4_4_2_illegal_inst_irq_funcs = {
1923	.process = sdma_v4_4_2_process_illegal_inst_irq,
1924};
1925
1926static const struct amdgpu_irq_src_funcs sdma_v4_4_2_ecc_irq_funcs = {
1927	.set = sdma_v4_4_2_set_ecc_irq_state,
1928	.process = amdgpu_sdma_process_ecc_irq,
1929};
1930
1931static const struct amdgpu_irq_src_funcs sdma_v4_4_2_vm_hole_irq_funcs = {
1932	.process = sdma_v4_4_2_process_vm_hole_irq,
1933};
1934
1935static const struct amdgpu_irq_src_funcs sdma_v4_4_2_doorbell_invalid_irq_funcs = {
1936	.process = sdma_v4_4_2_process_doorbell_invalid_irq,
1937};
1938
1939static const struct amdgpu_irq_src_funcs sdma_v4_4_2_pool_timeout_irq_funcs = {
1940	.process = sdma_v4_4_2_process_pool_timeout_irq,
1941};
1942
1943static const struct amdgpu_irq_src_funcs sdma_v4_4_2_srbm_write_irq_funcs = {
1944	.process = sdma_v4_4_2_process_srbm_write_irq,
1945};
1946
1947static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
1948{
1949	adev->sdma.trap_irq.num_types = adev->sdma.num_instances;
1950	adev->sdma.ecc_irq.num_types = adev->sdma.num_instances;
1951	adev->sdma.vm_hole_irq.num_types = adev->sdma.num_instances;
1952	adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances;
1953	adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances;
1954	adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances;
1955
1956	adev->sdma.trap_irq.funcs = &sdma_v4_4_2_trap_irq_funcs;
1957	adev->sdma.illegal_inst_irq.funcs = &sdma_v4_4_2_illegal_inst_irq_funcs;
1958	adev->sdma.ecc_irq.funcs = &sdma_v4_4_2_ecc_irq_funcs;
1959	adev->sdma.vm_hole_irq.funcs = &sdma_v4_4_2_vm_hole_irq_funcs;
1960	adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_4_2_doorbell_invalid_irq_funcs;
1961	adev->sdma.pool_timeout_irq.funcs = &sdma_v4_4_2_pool_timeout_irq_funcs;
1962	adev->sdma.srbm_write_irq.funcs = &sdma_v4_4_2_srbm_write_irq_funcs;
1963}
1964
1965/**
1966 * sdma_v4_4_2_emit_copy_buffer - copy buffer using the sDMA engine
1967 *
1968 * @ib: indirect buffer to copy to
1969 * @src_offset: src GPU address
1970 * @dst_offset: dst GPU address
1971 * @byte_count: number of bytes to xfer
1972 * @tmz: if a secure copy should be used
1973 *
1974 * Copy GPU buffers using the DMA engine.
1975 * Used by the amdgpu ttm implementation to move pages if
1976 * registered as the asic copy callback.
1977 */
1978static void sdma_v4_4_2_emit_copy_buffer(struct amdgpu_ib *ib,
1979				       uint64_t src_offset,
1980				       uint64_t dst_offset,
1981				       uint32_t byte_count,
1982				       bool tmz)
1983{
1984	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1985		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
1986		SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
1987	ib->ptr[ib->length_dw++] = byte_count - 1;
1988	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1989	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1990	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1991	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1992	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1993}
1994
1995/**
1996 * sdma_v4_4_2_emit_fill_buffer - fill buffer using the sDMA engine
1997 *
1998 * @ib: indirect buffer to copy to
1999 * @src_data: value to write to buffer
2000 * @dst_offset: dst GPU address
2001 * @byte_count: number of bytes to xfer
2002 *
2003 * Fill GPU buffers using the DMA engine.
2004 */
2005static void sdma_v4_4_2_emit_fill_buffer(struct amdgpu_ib *ib,
2006				       uint32_t src_data,
2007				       uint64_t dst_offset,
2008				       uint32_t byte_count)
2009{
2010	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
2011	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
2012	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
2013	ib->ptr[ib->length_dw++] = src_data;
2014	ib->ptr[ib->length_dw++] = byte_count - 1;
2015}
2016
2017static const struct amdgpu_buffer_funcs sdma_v4_4_2_buffer_funcs = {
2018	.copy_max_bytes = 0x400000,
2019	.copy_num_dw = 7,
2020	.emit_copy_buffer = sdma_v4_4_2_emit_copy_buffer,
2021
2022	.fill_max_bytes = 0x400000,
2023	.fill_num_dw = 5,
2024	.emit_fill_buffer = sdma_v4_4_2_emit_fill_buffer,
2025};
2026
2027static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev)
2028{
2029	adev->mman.buffer_funcs = &sdma_v4_4_2_buffer_funcs;
2030	if (adev->sdma.has_page_queue)
2031		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
2032	else
2033		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
2034}
2035
2036static const struct amdgpu_vm_pte_funcs sdma_v4_4_2_vm_pte_funcs = {
2037	.copy_pte_num_dw = 7,
2038	.copy_pte = sdma_v4_4_2_vm_copy_pte,
2039
2040	.write_pte = sdma_v4_4_2_vm_write_pte,
2041	.set_pte_pde = sdma_v4_4_2_vm_set_pte_pde,
2042};
2043
2044static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev)
2045{
2046	struct drm_gpu_scheduler *sched;
2047	unsigned i;
2048
2049	adev->vm_manager.vm_pte_funcs = &sdma_v4_4_2_vm_pte_funcs;
2050	for (i = 0; i < adev->sdma.num_instances; i++) {
2051		if (adev->sdma.has_page_queue)
2052			sched = &adev->sdma.instance[i].page.sched;
2053		else
2054			sched = &adev->sdma.instance[i].ring.sched;
2055		adev->vm_manager.vm_pte_scheds[i] = sched;
2056	}
2057	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
2058}
2059
2060const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block = {
2061	.type = AMD_IP_BLOCK_TYPE_SDMA,
2062	.major = 4,
2063	.minor = 4,
2064	.rev = 2,
2065	.funcs = &sdma_v4_4_2_ip_funcs,
2066};
2067
2068static int sdma_v4_4_2_xcp_resume(void *handle, uint32_t inst_mask)
2069{
2070	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2071	int r;
2072
2073	if (!amdgpu_sriov_vf(adev))
2074		sdma_v4_4_2_inst_init_golden_registers(adev, inst_mask);
2075
2076	r = sdma_v4_4_2_inst_start(adev, inst_mask);
2077
2078	return r;
2079}
2080
2081static int sdma_v4_4_2_xcp_suspend(void *handle, uint32_t inst_mask)
2082{
2083	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2084	uint32_t tmp_mask = inst_mask;
2085	int i;
2086
2087	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
2088		for_each_inst(i, tmp_mask) {
2089			amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
2090				       AMDGPU_SDMA_IRQ_INSTANCE0 + i);
2091		}
2092	}
2093
2094	sdma_v4_4_2_inst_ctx_switch_enable(adev, false, inst_mask);
2095	sdma_v4_4_2_inst_enable(adev, false, inst_mask);
2096
2097	return 0;
2098}
2099
2100struct amdgpu_xcp_ip_funcs sdma_v4_4_2_xcp_funcs = {
2101	.suspend = &sdma_v4_4_2_xcp_suspend,
2102	.resume = &sdma_v4_4_2_xcp_resume
2103};
2104
2105static const struct amdgpu_ras_err_status_reg_entry sdma_v4_2_2_ue_reg_list[] = {
2106	{AMDGPU_RAS_REG_ENTRY(SDMA0, 0, regSDMA_UE_ERR_STATUS_LO, regSDMA_UE_ERR_STATUS_HI),
2107	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SDMA"},
2108};
2109
2110static const struct amdgpu_ras_memory_id_entry sdma_v4_4_2_ras_memory_list[] = {
2111	{AMDGPU_SDMA_MBANK_DATA_BUF0, "SDMA_MBANK_DATA_BUF0"},
2112	{AMDGPU_SDMA_MBANK_DATA_BUF1, "SDMA_MBANK_DATA_BUF1"},
2113	{AMDGPU_SDMA_MBANK_DATA_BUF2, "SDMA_MBANK_DATA_BUF2"},
2114	{AMDGPU_SDMA_MBANK_DATA_BUF3, "SDMA_MBANK_DATA_BUF3"},
2115	{AMDGPU_SDMA_MBANK_DATA_BUF4, "SDMA_MBANK_DATA_BUF4"},
2116	{AMDGPU_SDMA_MBANK_DATA_BUF5, "SDMA_MBANK_DATA_BUF5"},
2117	{AMDGPU_SDMA_MBANK_DATA_BUF6, "SDMA_MBANK_DATA_BUF6"},
2118	{AMDGPU_SDMA_MBANK_DATA_BUF7, "SDMA_MBANK_DATA_BUF7"},
2119	{AMDGPU_SDMA_MBANK_DATA_BUF8, "SDMA_MBANK_DATA_BUF8"},
2120	{AMDGPU_SDMA_MBANK_DATA_BUF9, "SDMA_MBANK_DATA_BUF9"},
2121	{AMDGPU_SDMA_MBANK_DATA_BUF10, "SDMA_MBANK_DATA_BUF10"},
2122	{AMDGPU_SDMA_MBANK_DATA_BUF11, "SDMA_MBANK_DATA_BUF11"},
2123	{AMDGPU_SDMA_MBANK_DATA_BUF12, "SDMA_MBANK_DATA_BUF12"},
2124	{AMDGPU_SDMA_MBANK_DATA_BUF13, "SDMA_MBANK_DATA_BUF13"},
2125	{AMDGPU_SDMA_MBANK_DATA_BUF14, "SDMA_MBANK_DATA_BUF14"},
2126	{AMDGPU_SDMA_MBANK_DATA_BUF15, "SDMA_MBANK_DATA_BUF15"},
2127	{AMDGPU_SDMA_UCODE_BUF, "SDMA_UCODE_BUF"},
2128	{AMDGPU_SDMA_RB_CMD_BUF, "SDMA_RB_CMD_BUF"},
2129	{AMDGPU_SDMA_IB_CMD_BUF, "SDMA_IB_CMD_BUF"},
2130	{AMDGPU_SDMA_UTCL1_RD_FIFO, "SDMA_UTCL1_RD_FIFO"},
2131	{AMDGPU_SDMA_UTCL1_RDBST_FIFO, "SDMA_UTCL1_RDBST_FIFO"},
2132	{AMDGPU_SDMA_UTCL1_WR_FIFO, "SDMA_UTCL1_WR_FIFO"},
2133	{AMDGPU_SDMA_DATA_LUT_FIFO, "SDMA_DATA_LUT_FIFO"},
2134	{AMDGPU_SDMA_SPLIT_DAT_BUF, "SDMA_SPLIT_DAT_BUF"},
2135};
2136
2137static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev,
2138						   uint32_t sdma_inst,
2139						   void *ras_err_status)
2140{
2141	struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status;
2142	uint32_t sdma_dev_inst = GET_INST(SDMA0, sdma_inst);
2143	unsigned long ue_count = 0;
2144	struct amdgpu_smuio_mcm_config_info mcm_info = {
2145		.socket_id = adev->smuio.funcs->get_socket_id(adev),
2146		.die_id = adev->sdma.instance[sdma_inst].aid_id,
2147	};
2148
2149	/* sdma v4_4_2 doesn't support query ce counts */
2150	amdgpu_ras_inst_query_ras_error_count(adev,
2151					sdma_v4_2_2_ue_reg_list,
2152					ARRAY_SIZE(sdma_v4_2_2_ue_reg_list),
2153					sdma_v4_4_2_ras_memory_list,
2154					ARRAY_SIZE(sdma_v4_4_2_ras_memory_list),
2155					sdma_dev_inst,
2156					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
2157					&ue_count);
2158
2159	amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
2160}
2161
2162static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev,
2163					      void *ras_err_status)
2164{
2165	uint32_t inst_mask;
2166	int i = 0;
2167
2168	inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
2169	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
2170		for_each_inst(i, inst_mask)
2171			sdma_v4_4_2_inst_query_ras_error_count(adev, i, ras_err_status);
2172	} else {
2173		dev_warn(adev->dev, "SDMA RAS is not supported\n");
2174	}
2175}
2176
2177static void sdma_v4_4_2_inst_reset_ras_error_count(struct amdgpu_device *adev,
2178						   uint32_t sdma_inst)
2179{
2180	uint32_t sdma_dev_inst = GET_INST(SDMA0, sdma_inst);
2181
2182	amdgpu_ras_inst_reset_ras_error_count(adev,
2183					sdma_v4_2_2_ue_reg_list,
2184					ARRAY_SIZE(sdma_v4_2_2_ue_reg_list),
2185					sdma_dev_inst);
2186}
2187
2188static void sdma_v4_4_2_reset_ras_error_count(struct amdgpu_device *adev)
2189{
2190	uint32_t inst_mask;
2191	int i = 0;
2192
2193	inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
2194	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
2195		for_each_inst(i, inst_mask)
2196			sdma_v4_4_2_inst_reset_ras_error_count(adev, i);
2197	} else {
2198		dev_warn(adev->dev, "SDMA RAS is not supported\n");
2199	}
2200}
2201
2202static const struct amdgpu_ras_block_hw_ops sdma_v4_4_2_ras_hw_ops = {
2203	.query_ras_error_count = sdma_v4_4_2_query_ras_error_count,
2204	.reset_ras_error_count = sdma_v4_4_2_reset_ras_error_count,
2205};
2206
2207static struct amdgpu_sdma_ras sdma_v4_4_2_ras = {
2208	.ras_block = {
2209		.hw_ops = &sdma_v4_4_2_ras_hw_ops,
2210	},
2211};
2212
2213static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev)
2214{
2215	adev->sdma.ras = &sdma_v4_4_2_ras;
2216}