Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24#include <linux/firmware.h>
  25#include <drm/drmP.h>
  26#include "amdgpu.h"
  27#include "amdgpu_ucode.h"
  28#include "amdgpu_trace.h"
  29#include "vi.h"
  30#include "vid.h"
  31
  32#include "oss/oss_2_4_d.h"
  33#include "oss/oss_2_4_sh_mask.h"
  34
  35#include "gmc/gmc_7_1_d.h"
  36#include "gmc/gmc_7_1_sh_mask.h"
  37
  38#include "gca/gfx_8_0_d.h"
  39#include "gca/gfx_8_0_enum.h"
  40#include "gca/gfx_8_0_sh_mask.h"
  41
  42#include "bif/bif_5_0_d.h"
  43#include "bif/bif_5_0_sh_mask.h"
  44
  45#include "iceland_sdma_pkt_open.h"
  46
  47static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
  48static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
  49static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
  50static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev);
  51
  52MODULE_FIRMWARE("amdgpu/topaz_sdma.bin");
  53MODULE_FIRMWARE("amdgpu/topaz_sdma1.bin");
  54
  55static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
  56{
  57	SDMA0_REGISTER_OFFSET,
  58	SDMA1_REGISTER_OFFSET
  59};
  60
  61static const u32 golden_settings_iceland_a11[] =
  62{
  63	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
  64	mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
  65	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
  66	mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
  67};
  68
  69static const u32 iceland_mgcg_cgcg_init[] =
  70{
  71	mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
  72	mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
  73};
  74
  75/*
  76 * sDMA - System DMA
  77 * Starting with CIK, the GPU has new asynchronous
  78 * DMA engines.  These engines are used for compute
  79 * and gfx.  There are two DMA engines (SDMA0, SDMA1)
  80 * and each one supports 1 ring buffer used for gfx
  81 * and 2 queues used for compute.
  82 *
  83 * The programming model is very similar to the CP
  84 * (ring buffer, IBs, etc.), but sDMA has it's own
  85 * packet format that is different from the PM4 format
  86 * used by the CP. sDMA supports copying data, writing
  87 * embedded data, solid fills, and a number of other
  88 * things.  It also has support for tiling/detiling of
  89 * buffers.
  90 */
  91
  92static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
  93{
  94	switch (adev->asic_type) {
  95	case CHIP_TOPAZ:
  96		amdgpu_device_program_register_sequence(adev,
  97							iceland_mgcg_cgcg_init,
  98							ARRAY_SIZE(iceland_mgcg_cgcg_init));
  99		amdgpu_device_program_register_sequence(adev,
 100							golden_settings_iceland_a11,
 101							ARRAY_SIZE(golden_settings_iceland_a11));
 102		break;
 103	default:
 104		break;
 105	}
 106}
 107
 108static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
 109{
 110	int i;
 111	for (i = 0; i < adev->sdma.num_instances; i++) {
 112		release_firmware(adev->sdma.instance[i].fw);
 113		adev->sdma.instance[i].fw = NULL;
 114	}
 115}
 116
 117/**
 118 * sdma_v2_4_init_microcode - load ucode images from disk
 119 *
 120 * @adev: amdgpu_device pointer
 121 *
 122 * Use the firmware interface to load the ucode images into
 123 * the driver (not loaded into hw).
 124 * Returns 0 on success, error on failure.
 125 */
 126static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
 127{
 128	const char *chip_name;
 129	char fw_name[30];
 130	int err = 0, i;
 131	struct amdgpu_firmware_info *info = NULL;
 132	const struct common_firmware_header *header = NULL;
 133	const struct sdma_firmware_header_v1_0 *hdr;
 134
 135	DRM_DEBUG("\n");
 136
 137	switch (adev->asic_type) {
 138	case CHIP_TOPAZ:
 139		chip_name = "topaz";
 140		break;
 141	default: BUG();
 142	}
 143
 144	for (i = 0; i < adev->sdma.num_instances; i++) {
 145		if (i == 0)
 146			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
 147		else
 148			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
 149		err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
 150		if (err)
 151			goto out;
 152		err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
 153		if (err)
 154			goto out;
 155		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
 156		adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
 157		adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
 158		if (adev->sdma.instance[i].feature_version >= 20)
 159			adev->sdma.instance[i].burst_nop = true;
 160
 161		if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
 162			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
 163			info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
 164			info->fw = adev->sdma.instance[i].fw;
 165			header = (const struct common_firmware_header *)info->fw->data;
 166			adev->firmware.fw_size +=
 167				ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 168		}
 169	}
 170
 171out:
 172	if (err) {
 173		pr_err("sdma_v2_4: Failed to load firmware \"%s\"\n", fw_name);
 
 
 174		for (i = 0; i < adev->sdma.num_instances; i++) {
 175			release_firmware(adev->sdma.instance[i].fw);
 176			adev->sdma.instance[i].fw = NULL;
 177		}
 178	}
 179	return err;
 180}
 181
 182/**
 183 * sdma_v2_4_ring_get_rptr - get the current read pointer
 184 *
 185 * @ring: amdgpu ring pointer
 186 *
 187 * Get the current rptr from the hardware (VI+).
 188 */
 189static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
 190{
 
 
 191	/* XXX check if swapping is necessary on BE */
 192	return ring->adev->wb.wb[ring->rptr_offs] >> 2;
 
 
 193}
 194
 195/**
 196 * sdma_v2_4_ring_get_wptr - get the current write pointer
 197 *
 198 * @ring: amdgpu ring pointer
 199 *
 200 * Get the current wptr from the hardware (VI+).
 201 */
 202static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
 203{
 204	struct amdgpu_device *adev = ring->adev;
 205	int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
 206	u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
 207
 208	return wptr;
 209}
 210
 211/**
 212 * sdma_v2_4_ring_set_wptr - commit the write pointer
 213 *
 214 * @ring: amdgpu ring pointer
 215 *
 216 * Write the wptr back to the hardware (VI+).
 217 */
 218static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
 219{
 220	struct amdgpu_device *adev = ring->adev;
 221	int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
 222
 223	WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
 224}
 225
 226static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 227{
 228	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
 229	int i;
 230
 231	for (i = 0; i < count; i++)
 232		if (sdma && sdma->burst_nop && (i == 0))
 233			amdgpu_ring_write(ring, ring->funcs->nop |
 234				SDMA_PKT_NOP_HEADER_COUNT(count - 1));
 235		else
 236			amdgpu_ring_write(ring, ring->funcs->nop);
 237}
 238
 239/**
 240 * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine
 241 *
 242 * @ring: amdgpu ring pointer
 243 * @ib: IB object to schedule
 244 *
 245 * Schedule an IB in the DMA ring (VI).
 246 */
 247static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
 248				   struct amdgpu_ib *ib,
 249				   unsigned vmid, bool ctx_switch)
 250{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 251	/* IB packet must end on a 8 DW boundary */
 252	sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
 253
 254	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
 255			  SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
 256	/* base must be 32 byte aligned */
 257	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
 258	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
 259	amdgpu_ring_write(ring, ib->length_dw);
 260	amdgpu_ring_write(ring, 0);
 261	amdgpu_ring_write(ring, 0);
 262
 263}
 264
 265/**
 266 * sdma_v2_4_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
 267 *
 268 * @ring: amdgpu ring pointer
 269 *
 270 * Emit an hdp flush packet on the requested DMA ring.
 271 */
 272static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 273{
 274	u32 ref_and_mask = 0;
 275
 276	if (ring == &ring->adev->sdma.instance[0].ring)
 277		ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
 278	else
 279		ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
 280
 281	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
 282			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
 283			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
 284	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
 285	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
 286	amdgpu_ring_write(ring, ref_and_mask); /* reference */
 287	amdgpu_ring_write(ring, ref_and_mask); /* mask */
 288	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
 289			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 290}
 291
 
 
 
 
 
 
 
 292/**
 293 * sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring
 294 *
 295 * @ring: amdgpu ring pointer
 296 * @fence: amdgpu fence object
 297 *
 298 * Add a DMA fence packet to the ring to write
 299 * the fence seq number and DMA trap packet to generate
 300 * an interrupt if needed (VI).
 301 */
 302static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 303				      unsigned flags)
 304{
 305	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
 306	/* write the fence */
 307	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
 308	amdgpu_ring_write(ring, lower_32_bits(addr));
 309	amdgpu_ring_write(ring, upper_32_bits(addr));
 310	amdgpu_ring_write(ring, lower_32_bits(seq));
 311
 312	/* optionally write high bits as well */
 313	if (write64bit) {
 314		addr += 4;
 315		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
 316		amdgpu_ring_write(ring, lower_32_bits(addr));
 317		amdgpu_ring_write(ring, upper_32_bits(addr));
 318		amdgpu_ring_write(ring, upper_32_bits(seq));
 319	}
 320
 321	/* generate an interrupt */
 322	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
 323	amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
 324}
 325
 326/**
 327 * sdma_v2_4_gfx_stop - stop the gfx async dma engines
 328 *
 329 * @adev: amdgpu_device pointer
 330 *
 331 * Stop the gfx async dma ring buffers (VI).
 332 */
 333static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
 334{
 335	struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
 336	struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
 337	u32 rb_cntl, ib_cntl;
 338	int i;
 339
 340	if ((adev->mman.buffer_funcs_ring == sdma0) ||
 341	    (adev->mman.buffer_funcs_ring == sdma1))
 342		amdgpu_ttm_set_buffer_funcs_status(adev, false);
 343
 344	for (i = 0; i < adev->sdma.num_instances; i++) {
 345		rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
 346		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
 347		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 348		ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
 349		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
 350		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 351	}
 352	sdma0->ready = false;
 353	sdma1->ready = false;
 354}
 355
 356/**
 357 * sdma_v2_4_rlc_stop - stop the compute async dma engines
 358 *
 359 * @adev: amdgpu_device pointer
 360 *
 361 * Stop the compute async dma queues (VI).
 362 */
 363static void sdma_v2_4_rlc_stop(struct amdgpu_device *adev)
 364{
 365	/* XXX todo */
 366}
 367
 368/**
 369 * sdma_v2_4_enable - stop the async dma engines
 370 *
 371 * @adev: amdgpu_device pointer
 372 * @enable: enable/disable the DMA MEs.
 373 *
 374 * Halt or unhalt the async dma engines (VI).
 375 */
 376static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
 377{
 378	u32 f32_cntl;
 379	int i;
 380
 381	if (!enable) {
 382		sdma_v2_4_gfx_stop(adev);
 383		sdma_v2_4_rlc_stop(adev);
 384	}
 385
 386	for (i = 0; i < adev->sdma.num_instances; i++) {
 387		f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
 388		if (enable)
 389			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
 390		else
 391			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
 392		WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
 393	}
 394}
 395
 396/**
 397 * sdma_v2_4_gfx_resume - setup and start the async dma engines
 398 *
 399 * @adev: amdgpu_device pointer
 400 *
 401 * Set up the gfx DMA ring buffers and enable them (VI).
 402 * Returns 0 for success, error for failure.
 403 */
 404static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
 405{
 406	struct amdgpu_ring *ring;
 407	u32 rb_cntl, ib_cntl;
 408	u32 rb_bufsz;
 409	u32 wb_offset;
 410	int i, j, r;
 411
 412	for (i = 0; i < adev->sdma.num_instances; i++) {
 413		ring = &adev->sdma.instance[i].ring;
 414		wb_offset = (ring->rptr_offs * 4);
 415
 416		mutex_lock(&adev->srbm_mutex);
 417		for (j = 0; j < 16; j++) {
 418			vi_srbm_select(adev, 0, 0, 0, j);
 419			/* SDMA GFX */
 420			WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
 421			WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
 422		}
 423		vi_srbm_select(adev, 0, 0, 0, 0);
 424		mutex_unlock(&adev->srbm_mutex);
 425
 426		WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
 427		       adev->gfx.config.gb_addr_config & 0x70);
 428
 429		WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
 430
 431		/* Set ring buffer size in dwords */
 432		rb_bufsz = order_base_2(ring->ring_size / 4);
 433		rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
 434		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
 435#ifdef __BIG_ENDIAN
 436		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
 437		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
 438					RPTR_WRITEBACK_SWAP_ENABLE, 1);
 439#endif
 440		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 441
 442		/* Initialize the ring buffer's read and write pointers */
 443		WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
 444		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
 445		WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
 446		WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
 447
 448		/* set the wb address whether it's enabled or not */
 449		WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
 450		       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
 451		WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
 452		       lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
 453
 454		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
 455
 456		WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
 457		WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
 458
 459		ring->wptr = 0;
 460		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
 461
 462		/* enable DMA RB */
 463		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
 464		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 465
 466		ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
 467		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
 468#ifdef __BIG_ENDIAN
 469		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
 470#endif
 471		/* enable DMA IBs */
 472		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 473
 474		ring->ready = true;
 475	}
 476
 477	sdma_v2_4_enable(adev, true);
 478	for (i = 0; i < adev->sdma.num_instances; i++) {
 479		ring = &adev->sdma.instance[i].ring;
 480		r = amdgpu_ring_test_ring(ring);
 481		if (r) {
 482			ring->ready = false;
 483			return r;
 484		}
 485
 486		if (adev->mman.buffer_funcs_ring == ring)
 487			amdgpu_ttm_set_buffer_funcs_status(adev, true);
 488	}
 489
 490	return 0;
 491}
 492
 493/**
 494 * sdma_v2_4_rlc_resume - setup and start the async dma engines
 495 *
 496 * @adev: amdgpu_device pointer
 497 *
 498 * Set up the compute DMA queues and enable them (VI).
 499 * Returns 0 for success, error for failure.
 500 */
 501static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev)
 502{
 503	/* XXX todo */
 504	return 0;
 505}
 506
 507/**
 508 * sdma_v2_4_load_microcode - load the sDMA ME ucode
 509 *
 510 * @adev: amdgpu_device pointer
 511 *
 512 * Loads the sDMA0/1 ucode.
 513 * Returns 0 for success, -EINVAL if the ucode is not available.
 514 */
 515static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
 516{
 517	const struct sdma_firmware_header_v1_0 *hdr;
 518	const __le32 *fw_data;
 519	u32 fw_size;
 520	int i, j;
 521
 522	/* halt the MEs */
 523	sdma_v2_4_enable(adev, false);
 524
 525	for (i = 0; i < adev->sdma.num_instances; i++) {
 526		if (!adev->sdma.instance[i].fw)
 527			return -EINVAL;
 528		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
 529		amdgpu_ucode_print_sdma_hdr(&hdr->header);
 530		fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 531		fw_data = (const __le32 *)
 532			(adev->sdma.instance[i].fw->data +
 533			 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 534		WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
 535		for (j = 0; j < fw_size; j++)
 536			WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
 537		WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
 538	}
 539
 540	return 0;
 541}
 542
 543/**
 544 * sdma_v2_4_start - setup and start the async dma engines
 545 *
 546 * @adev: amdgpu_device pointer
 547 *
 548 * Set up the DMA engines and enable them (VI).
 549 * Returns 0 for success, error for failure.
 550 */
 551static int sdma_v2_4_start(struct amdgpu_device *adev)
 552{
 553	int r;
 554
 555
 556	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
 557		r = sdma_v2_4_load_microcode(adev);
 558		if (r)
 559			return r;
 
 
 
 
 
 
 
 
 
 560	}
 561
 562	/* halt the engine before programing */
 563	sdma_v2_4_enable(adev, false);
 564
 565	/* start the gfx rings and rlc compute queues */
 566	r = sdma_v2_4_gfx_resume(adev);
 567	if (r)
 568		return r;
 569	r = sdma_v2_4_rlc_resume(adev);
 570	if (r)
 571		return r;
 572
 573	return 0;
 574}
 575
 576/**
 577 * sdma_v2_4_ring_test_ring - simple async dma engine test
 578 *
 579 * @ring: amdgpu_ring structure holding ring information
 580 *
 581 * Test the DMA engine by writing using it to write an
 582 * value to memory. (VI).
 583 * Returns 0 for success, error for failure.
 584 */
 585static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
 586{
 587	struct amdgpu_device *adev = ring->adev;
 588	unsigned i;
 589	unsigned index;
 590	int r;
 591	u32 tmp;
 592	u64 gpu_addr;
 593
 594	r = amdgpu_device_wb_get(adev, &index);
 595	if (r) {
 596		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
 597		return r;
 598	}
 599
 600	gpu_addr = adev->wb.gpu_addr + (index * 4);
 601	tmp = 0xCAFEDEAD;
 602	adev->wb.wb[index] = cpu_to_le32(tmp);
 603
 604	r = amdgpu_ring_alloc(ring, 5);
 605	if (r) {
 606		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
 607		amdgpu_device_wb_free(adev, index);
 608		return r;
 609	}
 610
 611	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 612			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
 613	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
 614	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
 615	amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
 616	amdgpu_ring_write(ring, 0xDEADBEEF);
 617	amdgpu_ring_commit(ring);
 618
 619	for (i = 0; i < adev->usec_timeout; i++) {
 620		tmp = le32_to_cpu(adev->wb.wb[index]);
 621		if (tmp == 0xDEADBEEF)
 622			break;
 623		DRM_UDELAY(1);
 624	}
 625
 626	if (i < adev->usec_timeout) {
 627		DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
 628	} else {
 629		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
 630			  ring->idx, tmp);
 631		r = -EINVAL;
 632	}
 633	amdgpu_device_wb_free(adev, index);
 634
 635	return r;
 636}
 637
 638/**
 639 * sdma_v2_4_ring_test_ib - test an IB on the DMA engine
 640 *
 641 * @ring: amdgpu_ring structure holding ring information
 642 *
 643 * Test a simple IB in the DMA ring (VI).
 644 * Returns 0 on success, error on failure.
 645 */
 646static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 647{
 648	struct amdgpu_device *adev = ring->adev;
 649	struct amdgpu_ib ib;
 650	struct dma_fence *f = NULL;
 
 651	unsigned index;
 
 652	u32 tmp = 0;
 653	u64 gpu_addr;
 654	long r;
 655
 656	r = amdgpu_device_wb_get(adev, &index);
 657	if (r) {
 658		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
 659		return r;
 660	}
 661
 662	gpu_addr = adev->wb.gpu_addr + (index * 4);
 663	tmp = 0xCAFEDEAD;
 664	adev->wb.wb[index] = cpu_to_le32(tmp);
 665	memset(&ib, 0, sizeof(ib));
 666	r = amdgpu_ib_get(adev, NULL, 256, &ib);
 667	if (r) {
 668		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
 669		goto err0;
 670	}
 671
 672	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 673		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
 674	ib.ptr[1] = lower_32_bits(gpu_addr);
 675	ib.ptr[2] = upper_32_bits(gpu_addr);
 676	ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
 677	ib.ptr[4] = 0xDEADBEEF;
 678	ib.ptr[5] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 679	ib.ptr[6] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 680	ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 681	ib.length_dw = 8;
 682
 683	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 684	if (r)
 685		goto err1;
 686
 687	r = dma_fence_wait_timeout(f, false, timeout);
 688	if (r == 0) {
 689		DRM_ERROR("amdgpu: IB test timed out\n");
 690		r = -ETIMEDOUT;
 691		goto err1;
 692	} else if (r < 0) {
 693		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 694		goto err1;
 695	}
 696	tmp = le32_to_cpu(adev->wb.wb[index]);
 697	if (tmp == 0xDEADBEEF) {
 698		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
 699		r = 0;
 
 
 
 
 
 
 700	} else {
 701		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
 702		r = -EINVAL;
 703	}
 704
 705err1:
 
 706	amdgpu_ib_free(adev, &ib, NULL);
 707	dma_fence_put(f);
 708err0:
 709	amdgpu_device_wb_free(adev, index);
 710	return r;
 711}
 712
 713/**
 714 * sdma_v2_4_vm_copy_pte - update PTEs by copying them from the GART
 715 *
 716 * @ib: indirect buffer to fill with commands
 717 * @pe: addr of the page entry
 718 * @src: src addr to copy from
 719 * @count: number of page entries to update
 720 *
 721 * Update PTEs by copying them from the GART using sDMA (CIK).
 722 */
 723static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
 724				  uint64_t pe, uint64_t src,
 725				  unsigned count)
 726{
 727	unsigned bytes = count * 8;
 728
 729	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
 730		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
 731	ib->ptr[ib->length_dw++] = bytes;
 732	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
 733	ib->ptr[ib->length_dw++] = lower_32_bits(src);
 734	ib->ptr[ib->length_dw++] = upper_32_bits(src);
 735	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
 736	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 
 
 
 
 
 
 
 
 737}
 738
 739/**
 740 * sdma_v2_4_vm_write_pte - update PTEs by writing them manually
 741 *
 742 * @ib: indirect buffer to fill with commands
 743 * @pe: addr of the page entry
 744 * @value: dst addr to write into pe
 745 * @count: number of page entries to update
 746 * @incr: increase next addr by incr bytes
 
 747 *
 748 * Update PTEs by writing them manually using sDMA (CIK).
 749 */
 750static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
 751				   uint64_t value, unsigned count,
 752				   uint32_t incr)
 753{
 754	unsigned ndw = count * 2;
 755
 756	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 757		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
 758	ib->ptr[ib->length_dw++] = pe;
 759	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 760	ib->ptr[ib->length_dw++] = ndw;
 761	for (; ndw > 0; ndw -= 2) {
 762		ib->ptr[ib->length_dw++] = lower_32_bits(value);
 763		ib->ptr[ib->length_dw++] = upper_32_bits(value);
 764		value += incr;
 
 
 
 
 
 
 
 
 
 
 
 765	}
 766}
 767
 768/**
 769 * sdma_v2_4_vm_set_pte_pde - update the page tables using sDMA
 770 *
 771 * @ib: indirect buffer to fill with commands
 772 * @pe: addr of the page entry
 773 * @addr: dst addr to write into pe
 774 * @count: number of page entries to update
 775 * @incr: increase next addr by incr bytes
 776 * @flags: access flags
 777 *
 778 * Update the page tables using sDMA (CIK).
 779 */
 780static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
 
 781				     uint64_t addr, unsigned count,
 782				     uint32_t incr, uint64_t flags)
 783{
 784	/* for physically contiguous pages (vram) */
 785	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
 786	ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
 787	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 788	ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
 789	ib->ptr[ib->length_dw++] = upper_32_bits(flags);
 790	ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
 791	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 792	ib->ptr[ib->length_dw++] = incr; /* increment size */
 793	ib->ptr[ib->length_dw++] = 0;
 794	ib->ptr[ib->length_dw++] = count; /* number of entries */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 795}
 796
 797/**
 798 * sdma_v2_4_ring_pad_ib - pad the IB to the required number of dw
 799 *
 800 * @ib: indirect buffer to fill with padding
 801 *
 802 */
 803static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 804{
 805	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
 806	u32 pad_count;
 807	int i;
 808
 809	pad_count = (8 - (ib->length_dw & 0x7)) % 8;
 810	for (i = 0; i < pad_count; i++)
 811		if (sdma && sdma->burst_nop && (i == 0))
 812			ib->ptr[ib->length_dw++] =
 813				SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
 814				SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
 815		else
 816			ib->ptr[ib->length_dw++] =
 817				SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 818}
 819
 820/**
 821 * sdma_v2_4_ring_emit_pipeline_sync - sync the pipeline
 822 *
 823 * @ring: amdgpu_ring pointer
 824 *
 825 * Make sure all previous operations are completed (CIK).
 826 */
 827static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 828{
 829	uint32_t seq = ring->fence_drv.sync_seq;
 830	uint64_t addr = ring->fence_drv.gpu_addr;
 831
 832	/* wait for idle */
 833	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
 834			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
 835			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
 836			  SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
 837	amdgpu_ring_write(ring, addr & 0xfffffffc);
 838	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
 839	amdgpu_ring_write(ring, seq); /* reference */
 840	amdgpu_ring_write(ring, 0xffffffff); /* mask */
 841	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
 842			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
 843}
 844
 845/**
 846 * sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA
 847 *
 848 * @ring: amdgpu_ring pointer
 849 * @vm: amdgpu_vm pointer
 850 *
 851 * Update the page table base and flush the VM TLB
 852 * using sDMA (VI).
 853 */
 854static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
 855					 unsigned vmid, uint64_t pd_addr)
 856{
 857	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 858
 859	/* wait for flush */
 860	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
 861			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
 862			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
 863	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
 864	amdgpu_ring_write(ring, 0);
 865	amdgpu_ring_write(ring, 0); /* reference */
 866	amdgpu_ring_write(ring, 0); /* mask */
 867	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
 868			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 869}
 870
 871static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring,
 872				     uint32_t reg, uint32_t val)
 873{
 874	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
 875			  SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
 876	amdgpu_ring_write(ring, reg);
 877	amdgpu_ring_write(ring, val);
 878}
 879
 880static int sdma_v2_4_early_init(void *handle)
 881{
 882	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 883
 884	adev->sdma.num_instances = SDMA_MAX_INSTANCE;
 885
 886	sdma_v2_4_set_ring_funcs(adev);
 887	sdma_v2_4_set_buffer_funcs(adev);
 888	sdma_v2_4_set_vm_pte_funcs(adev);
 889	sdma_v2_4_set_irq_funcs(adev);
 890
 891	return 0;
 892}
 893
 894static int sdma_v2_4_sw_init(void *handle)
 895{
 896	struct amdgpu_ring *ring;
 897	int r, i;
 898	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 899
 900	/* SDMA trap event */
 901	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
 902			      &adev->sdma.trap_irq);
 903	if (r)
 904		return r;
 905
 906	/* SDMA Privileged inst */
 907	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
 908			      &adev->sdma.illegal_inst_irq);
 909	if (r)
 910		return r;
 911
 912	/* SDMA Privileged inst */
 913	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
 914			      &adev->sdma.illegal_inst_irq);
 915	if (r)
 916		return r;
 917
 918	r = sdma_v2_4_init_microcode(adev);
 919	if (r) {
 920		DRM_ERROR("Failed to load sdma firmware!\n");
 921		return r;
 922	}
 923
 924	for (i = 0; i < adev->sdma.num_instances; i++) {
 925		ring = &adev->sdma.instance[i].ring;
 926		ring->ring_obj = NULL;
 927		ring->use_doorbell = false;
 928		sprintf(ring->name, "sdma%d", i);
 929		r = amdgpu_ring_init(adev, ring, 1024,
 
 930				     &adev->sdma.trap_irq,
 931				     (i == 0) ?
 932				     AMDGPU_SDMA_IRQ_TRAP0 :
 933				     AMDGPU_SDMA_IRQ_TRAP1);
 934		if (r)
 935			return r;
 936	}
 937
 938	return r;
 939}
 940
 941static int sdma_v2_4_sw_fini(void *handle)
 942{
 943	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 944	int i;
 945
 946	for (i = 0; i < adev->sdma.num_instances; i++)
 947		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 948
 949	sdma_v2_4_free_microcode(adev);
 950	return 0;
 951}
 952
 953static int sdma_v2_4_hw_init(void *handle)
 954{
 955	int r;
 956	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 957
 958	sdma_v2_4_init_golden_registers(adev);
 959
 960	r = sdma_v2_4_start(adev);
 961	if (r)
 962		return r;
 963
 964	return r;
 965}
 966
 967static int sdma_v2_4_hw_fini(void *handle)
 968{
 969	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 970
 971	sdma_v2_4_enable(adev, false);
 972
 973	return 0;
 974}
 975
 976static int sdma_v2_4_suspend(void *handle)
 977{
 978	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 979
 980	return sdma_v2_4_hw_fini(adev);
 981}
 982
 983static int sdma_v2_4_resume(void *handle)
 984{
 985	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 986
 987	return sdma_v2_4_hw_init(adev);
 988}
 989
 990static bool sdma_v2_4_is_idle(void *handle)
 991{
 992	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 993	u32 tmp = RREG32(mmSRBM_STATUS2);
 994
 995	if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
 996		   SRBM_STATUS2__SDMA1_BUSY_MASK))
 997	    return false;
 998
 999	return true;
1000}
1001
1002static int sdma_v2_4_wait_for_idle(void *handle)
1003{
1004	unsigned i;
1005	u32 tmp;
1006	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1007
1008	for (i = 0; i < adev->usec_timeout; i++) {
1009		tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
1010				SRBM_STATUS2__SDMA1_BUSY_MASK);
1011
1012		if (!tmp)
1013			return 0;
1014		udelay(1);
1015	}
1016	return -ETIMEDOUT;
1017}
1018
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1019static int sdma_v2_4_soft_reset(void *handle)
1020{
1021	u32 srbm_soft_reset = 0;
1022	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1023	u32 tmp = RREG32(mmSRBM_STATUS2);
1024
1025	if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
1026		/* sdma0 */
1027		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
1028		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
1029		WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
1030		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
1031	}
1032	if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
1033		/* sdma1 */
1034		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
1035		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
1036		WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
1037		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
1038	}
1039
1040	if (srbm_soft_reset) {
 
 
1041		tmp = RREG32(mmSRBM_SOFT_RESET);
1042		tmp |= srbm_soft_reset;
1043		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1044		WREG32(mmSRBM_SOFT_RESET, tmp);
1045		tmp = RREG32(mmSRBM_SOFT_RESET);
1046
1047		udelay(50);
1048
1049		tmp &= ~srbm_soft_reset;
1050		WREG32(mmSRBM_SOFT_RESET, tmp);
1051		tmp = RREG32(mmSRBM_SOFT_RESET);
1052
1053		/* Wait a little for things to settle down */
1054		udelay(50);
 
 
1055	}
1056
1057	return 0;
1058}
1059
1060static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
1061					struct amdgpu_irq_src *src,
1062					unsigned type,
1063					enum amdgpu_interrupt_state state)
1064{
1065	u32 sdma_cntl;
1066
1067	switch (type) {
1068	case AMDGPU_SDMA_IRQ_TRAP0:
1069		switch (state) {
1070		case AMDGPU_IRQ_STATE_DISABLE:
1071			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1072			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1073			WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1074			break;
1075		case AMDGPU_IRQ_STATE_ENABLE:
1076			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1077			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1078			WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1079			break;
1080		default:
1081			break;
1082		}
1083		break;
1084	case AMDGPU_SDMA_IRQ_TRAP1:
1085		switch (state) {
1086		case AMDGPU_IRQ_STATE_DISABLE:
1087			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1088			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1089			WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1090			break;
1091		case AMDGPU_IRQ_STATE_ENABLE:
1092			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1093			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1094			WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1095			break;
1096		default:
1097			break;
1098		}
1099		break;
1100	default:
1101		break;
1102	}
1103	return 0;
1104}
1105
1106static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
1107				      struct amdgpu_irq_src *source,
1108				      struct amdgpu_iv_entry *entry)
1109{
1110	u8 instance_id, queue_id;
1111
1112	instance_id = (entry->ring_id & 0x3) >> 0;
1113	queue_id = (entry->ring_id & 0xc) >> 2;
1114	DRM_DEBUG("IH: SDMA trap\n");
1115	switch (instance_id) {
1116	case 0:
1117		switch (queue_id) {
1118		case 0:
1119			amdgpu_fence_process(&adev->sdma.instance[0].ring);
1120			break;
1121		case 1:
1122			/* XXX compute */
1123			break;
1124		case 2:
1125			/* XXX compute */
1126			break;
1127		}
1128		break;
1129	case 1:
1130		switch (queue_id) {
1131		case 0:
1132			amdgpu_fence_process(&adev->sdma.instance[1].ring);
1133			break;
1134		case 1:
1135			/* XXX compute */
1136			break;
1137		case 2:
1138			/* XXX compute */
1139			break;
1140		}
1141		break;
1142	}
1143	return 0;
1144}
1145
1146static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
1147					      struct amdgpu_irq_src *source,
1148					      struct amdgpu_iv_entry *entry)
1149{
1150	DRM_ERROR("Illegal instruction in SDMA command stream\n");
1151	schedule_work(&adev->reset_work);
1152	return 0;
1153}
1154
1155static int sdma_v2_4_set_clockgating_state(void *handle,
1156					  enum amd_clockgating_state state)
1157{
1158	/* XXX handled via the smc on VI */
1159	return 0;
1160}
1161
1162static int sdma_v2_4_set_powergating_state(void *handle,
1163					  enum amd_powergating_state state)
1164{
1165	return 0;
1166}
1167
1168static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
1169	.name = "sdma_v2_4",
1170	.early_init = sdma_v2_4_early_init,
1171	.late_init = NULL,
1172	.sw_init = sdma_v2_4_sw_init,
1173	.sw_fini = sdma_v2_4_sw_fini,
1174	.hw_init = sdma_v2_4_hw_init,
1175	.hw_fini = sdma_v2_4_hw_fini,
1176	.suspend = sdma_v2_4_suspend,
1177	.resume = sdma_v2_4_resume,
1178	.is_idle = sdma_v2_4_is_idle,
1179	.wait_for_idle = sdma_v2_4_wait_for_idle,
1180	.soft_reset = sdma_v2_4_soft_reset,
 
1181	.set_clockgating_state = sdma_v2_4_set_clockgating_state,
1182	.set_powergating_state = sdma_v2_4_set_powergating_state,
1183};
1184
1185static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
1186	.type = AMDGPU_RING_TYPE_SDMA,
1187	.align_mask = 0xf,
1188	.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1189	.support_64bit_ptrs = false,
1190	.get_rptr = sdma_v2_4_ring_get_rptr,
1191	.get_wptr = sdma_v2_4_ring_get_wptr,
1192	.set_wptr = sdma_v2_4_ring_set_wptr,
1193	.emit_frame_size =
1194		6 + /* sdma_v2_4_ring_emit_hdp_flush */
1195		3 + /* hdp invalidate */
1196		6 + /* sdma_v2_4_ring_emit_pipeline_sync */
1197		VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* sdma_v2_4_ring_emit_vm_flush */
1198		10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
1199	.emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
1200	.emit_ib = sdma_v2_4_ring_emit_ib,
1201	.emit_fence = sdma_v2_4_ring_emit_fence,
1202	.emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
1203	.emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
1204	.emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
 
1205	.test_ring = sdma_v2_4_ring_test_ring,
1206	.test_ib = sdma_v2_4_ring_test_ib,
1207	.insert_nop = sdma_v2_4_ring_insert_nop,
1208	.pad_ib = sdma_v2_4_ring_pad_ib,
1209	.emit_wreg = sdma_v2_4_ring_emit_wreg,
1210};
1211
1212static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
1213{
1214	int i;
1215
1216	for (i = 0; i < adev->sdma.num_instances; i++)
1217		adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
1218}
1219
1220static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
1221	.set = sdma_v2_4_set_trap_irq_state,
1222	.process = sdma_v2_4_process_trap_irq,
1223};
1224
1225static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = {
1226	.process = sdma_v2_4_process_illegal_inst_irq,
1227};
1228
1229static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
1230{
1231	adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1232	adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
1233	adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
1234}
1235
1236/**
1237 * sdma_v2_4_emit_copy_buffer - copy buffer using the sDMA engine
1238 *
1239 * @ring: amdgpu_ring structure holding ring information
1240 * @src_offset: src GPU address
1241 * @dst_offset: dst GPU address
1242 * @byte_count: number of bytes to xfer
1243 *
1244 * Copy GPU buffers using the DMA engine (VI).
1245 * Used by the amdgpu ttm implementation to move pages if
1246 * registered as the asic copy callback.
1247 */
1248static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
1249				       uint64_t src_offset,
1250				       uint64_t dst_offset,
1251				       uint32_t byte_count)
1252{
1253	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1254		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1255	ib->ptr[ib->length_dw++] = byte_count;
1256	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1257	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1258	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1259	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1260	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1261}
1262
1263/**
1264 * sdma_v2_4_emit_fill_buffer - fill buffer using the sDMA engine
1265 *
1266 * @ring: amdgpu_ring structure holding ring information
1267 * @src_data: value to write to buffer
1268 * @dst_offset: dst GPU address
1269 * @byte_count: number of bytes to xfer
1270 *
1271 * Fill GPU buffers using the DMA engine (VI).
1272 */
1273static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ib *ib,
1274				       uint32_t src_data,
1275				       uint64_t dst_offset,
1276				       uint32_t byte_count)
1277{
1278	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1279	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1280	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1281	ib->ptr[ib->length_dw++] = src_data;
1282	ib->ptr[ib->length_dw++] = byte_count;
1283}
1284
1285static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
1286	.copy_max_bytes = 0x1fffff,
1287	.copy_num_dw = 7,
1288	.emit_copy_buffer = sdma_v2_4_emit_copy_buffer,
1289
1290	.fill_max_bytes = 0x1fffff,
1291	.fill_num_dw = 7,
1292	.emit_fill_buffer = sdma_v2_4_emit_fill_buffer,
1293};
1294
1295static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
1296{
1297	if (adev->mman.buffer_funcs == NULL) {
1298		adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
1299		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1300	}
1301}
1302
1303static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
1304	.copy_pte_num_dw = 7,
1305	.copy_pte = sdma_v2_4_vm_copy_pte,
1306
1307	.write_pte = sdma_v2_4_vm_write_pte,
1308	.set_pte_pde = sdma_v2_4_vm_set_pte_pde,
1309};
1310
1311static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
1312{
1313	unsigned i;
1314
1315	if (adev->vm_manager.vm_pte_funcs == NULL) {
1316		adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
1317		for (i = 0; i < adev->sdma.num_instances; i++)
1318			adev->vm_manager.vm_pte_rings[i] =
1319				&adev->sdma.instance[i].ring;
1320
1321		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
1322	}
1323}
1324
1325const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
1326{
1327	.type = AMD_IP_BLOCK_TYPE_SDMA,
1328	.major = 2,
1329	.minor = 4,
1330	.rev = 0,
1331	.funcs = &sdma_v2_4_ip_funcs,
1332};
v4.6
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24#include <linux/firmware.h>
  25#include <drm/drmP.h>
  26#include "amdgpu.h"
  27#include "amdgpu_ucode.h"
  28#include "amdgpu_trace.h"
  29#include "vi.h"
  30#include "vid.h"
  31
  32#include "oss/oss_2_4_d.h"
  33#include "oss/oss_2_4_sh_mask.h"
  34
  35#include "gmc/gmc_7_1_d.h"
  36#include "gmc/gmc_7_1_sh_mask.h"
  37
  38#include "gca/gfx_8_0_d.h"
  39#include "gca/gfx_8_0_enum.h"
  40#include "gca/gfx_8_0_sh_mask.h"
  41
  42#include "bif/bif_5_0_d.h"
  43#include "bif/bif_5_0_sh_mask.h"
  44
  45#include "iceland_sdma_pkt_open.h"
  46
  47static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
  48static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
  49static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
  50static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev);
  51
  52MODULE_FIRMWARE("amdgpu/topaz_sdma.bin");
  53MODULE_FIRMWARE("amdgpu/topaz_sdma1.bin");
  54
  55static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
  56{
  57	SDMA0_REGISTER_OFFSET,
  58	SDMA1_REGISTER_OFFSET
  59};
  60
  61static const u32 golden_settings_iceland_a11[] =
  62{
  63	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
  64	mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
  65	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
  66	mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
  67};
  68
  69static const u32 iceland_mgcg_cgcg_init[] =
  70{
  71	mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
  72	mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
  73};
  74
  75/*
  76 * sDMA - System DMA
  77 * Starting with CIK, the GPU has new asynchronous
  78 * DMA engines.  These engines are used for compute
  79 * and gfx.  There are two DMA engines (SDMA0, SDMA1)
  80 * and each one supports 1 ring buffer used for gfx
  81 * and 2 queues used for compute.
  82 *
  83 * The programming model is very similar to the CP
  84 * (ring buffer, IBs, etc.), but sDMA has it's own
  85 * packet format that is different from the PM4 format
  86 * used by the CP. sDMA supports copying data, writing
  87 * embedded data, solid fills, and a number of other
  88 * things.  It also has support for tiling/detiling of
  89 * buffers.
  90 */
  91
  92static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
  93{
  94	switch (adev->asic_type) {
  95	case CHIP_TOPAZ:
  96		amdgpu_program_register_sequence(adev,
  97						 iceland_mgcg_cgcg_init,
  98						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
  99		amdgpu_program_register_sequence(adev,
 100						 golden_settings_iceland_a11,
 101						 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
 102		break;
 103	default:
 104		break;
 105	}
 106}
 107
 
 
 
 
 
 
 
 
 
 108/**
 109 * sdma_v2_4_init_microcode - load ucode images from disk
 110 *
 111 * @adev: amdgpu_device pointer
 112 *
 113 * Use the firmware interface to load the ucode images into
 114 * the driver (not loaded into hw).
 115 * Returns 0 on success, error on failure.
 116 */
 117static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
 118{
 119	const char *chip_name;
 120	char fw_name[30];
 121	int err = 0, i;
 122	struct amdgpu_firmware_info *info = NULL;
 123	const struct common_firmware_header *header = NULL;
 124	const struct sdma_firmware_header_v1_0 *hdr;
 125
 126	DRM_DEBUG("\n");
 127
 128	switch (adev->asic_type) {
 129	case CHIP_TOPAZ:
 130		chip_name = "topaz";
 131		break;
 132	default: BUG();
 133	}
 134
 135	for (i = 0; i < adev->sdma.num_instances; i++) {
 136		if (i == 0)
 137			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
 138		else
 139			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
 140		err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
 141		if (err)
 142			goto out;
 143		err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
 144		if (err)
 145			goto out;
 146		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
 147		adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
 148		adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
 149		if (adev->sdma.instance[i].feature_version >= 20)
 150			adev->sdma.instance[i].burst_nop = true;
 151
 152		if (adev->firmware.smu_load) {
 153			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
 154			info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
 155			info->fw = adev->sdma.instance[i].fw;
 156			header = (const struct common_firmware_header *)info->fw->data;
 157			adev->firmware.fw_size +=
 158				ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 159		}
 160	}
 161
 162out:
 163	if (err) {
 164		printk(KERN_ERR
 165		       "sdma_v2_4: Failed to load firmware \"%s\"\n",
 166		       fw_name);
 167		for (i = 0; i < adev->sdma.num_instances; i++) {
 168			release_firmware(adev->sdma.instance[i].fw);
 169			adev->sdma.instance[i].fw = NULL;
 170		}
 171	}
 172	return err;
 173}
 174
 175/**
 176 * sdma_v2_4_ring_get_rptr - get the current read pointer
 177 *
 178 * @ring: amdgpu ring pointer
 179 *
 180 * Get the current rptr from the hardware (VI+).
 181 */
 182static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
 183{
 184	u32 rptr;
 185
 186	/* XXX check if swapping is necessary on BE */
 187	rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2;
 188
 189	return rptr;
 190}
 191
 192/**
 193 * sdma_v2_4_ring_get_wptr - get the current write pointer
 194 *
 195 * @ring: amdgpu ring pointer
 196 *
 197 * Get the current wptr from the hardware (VI+).
 198 */
 199static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
 200{
 201	struct amdgpu_device *adev = ring->adev;
 202	int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
 203	u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
 204
 205	return wptr;
 206}
 207
 208/**
 209 * sdma_v2_4_ring_set_wptr - commit the write pointer
 210 *
 211 * @ring: amdgpu ring pointer
 212 *
 213 * Write the wptr back to the hardware (VI+).
 214 */
 215static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
 216{
 217	struct amdgpu_device *adev = ring->adev;
 218	int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
 219
 220	WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
 221}
 222
 223static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 224{
 225	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
 226	int i;
 227
 228	for (i = 0; i < count; i++)
 229		if (sdma && sdma->burst_nop && (i == 0))
 230			amdgpu_ring_write(ring, ring->nop |
 231				SDMA_PKT_NOP_HEADER_COUNT(count - 1));
 232		else
 233			amdgpu_ring_write(ring, ring->nop);
 234}
 235
 236/**
 237 * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine
 238 *
 239 * @ring: amdgpu ring pointer
 240 * @ib: IB object to schedule
 241 *
 242 * Schedule an IB in the DMA ring (VI).
 243 */
 244static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
 245				   struct amdgpu_ib *ib)
 
 246{
 247	u32 vmid = ib->vm_id & 0xf;
 248	u32 next_rptr = ring->wptr + 5;
 249
 250	while ((next_rptr & 7) != 2)
 251		next_rptr++;
 252
 253	next_rptr += 6;
 254
 255	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 256			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
 257	amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc);
 258	amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
 259	amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
 260	amdgpu_ring_write(ring, next_rptr);
 261
 262	/* IB packet must end on a 8 DW boundary */
 263	sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
 264
 265	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
 266			  SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
 267	/* base must be 32 byte aligned */
 268	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
 269	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
 270	amdgpu_ring_write(ring, ib->length_dw);
 271	amdgpu_ring_write(ring, 0);
 272	amdgpu_ring_write(ring, 0);
 273
 274}
 275
 276/**
 277 * sdma_v2_4_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
 278 *
 279 * @ring: amdgpu ring pointer
 280 *
 281 * Emit an hdp flush packet on the requested DMA ring.
 282 */
 283static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 284{
 285	u32 ref_and_mask = 0;
 286
 287	if (ring == &ring->adev->sdma.instance[0].ring)
 288		ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
 289	else
 290		ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
 291
 292	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
 293			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
 294			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
 295	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
 296	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
 297	amdgpu_ring_write(ring, ref_and_mask); /* reference */
 298	amdgpu_ring_write(ring, ref_and_mask); /* mask */
 299	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
 300			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 301}
 302
 303static void sdma_v2_4_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
 304{
 305	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
 306			  SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
 307	amdgpu_ring_write(ring, mmHDP_DEBUG0);
 308	amdgpu_ring_write(ring, 1);
 309}
 310/**
 311 * sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring
 312 *
 313 * @ring: amdgpu ring pointer
 314 * @fence: amdgpu fence object
 315 *
 316 * Add a DMA fence packet to the ring to write
 317 * the fence seq number and DMA trap packet to generate
 318 * an interrupt if needed (VI).
 319 */
 320static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 321				      unsigned flags)
 322{
 323	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
 324	/* write the fence */
 325	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
 326	amdgpu_ring_write(ring, lower_32_bits(addr));
 327	amdgpu_ring_write(ring, upper_32_bits(addr));
 328	amdgpu_ring_write(ring, lower_32_bits(seq));
 329
 330	/* optionally write high bits as well */
 331	if (write64bit) {
 332		addr += 4;
 333		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
 334		amdgpu_ring_write(ring, lower_32_bits(addr));
 335		amdgpu_ring_write(ring, upper_32_bits(addr));
 336		amdgpu_ring_write(ring, upper_32_bits(seq));
 337	}
 338
 339	/* generate an interrupt */
 340	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
 341	amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
 342}
 343
 344/**
 345 * sdma_v2_4_gfx_stop - stop the gfx async dma engines
 346 *
 347 * @adev: amdgpu_device pointer
 348 *
 349 * Stop the gfx async dma ring buffers (VI).
 350 */
 351static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
 352{
 353	struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
 354	struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
 355	u32 rb_cntl, ib_cntl;
 356	int i;
 357
 358	if ((adev->mman.buffer_funcs_ring == sdma0) ||
 359	    (adev->mman.buffer_funcs_ring == sdma1))
 360		amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
 361
 362	for (i = 0; i < adev->sdma.num_instances; i++) {
 363		rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
 364		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
 365		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 366		ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
 367		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
 368		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 369	}
 370	sdma0->ready = false;
 371	sdma1->ready = false;
 372}
 373
 374/**
 375 * sdma_v2_4_rlc_stop - stop the compute async dma engines
 376 *
 377 * @adev: amdgpu_device pointer
 378 *
 379 * Stop the compute async dma queues (VI).
 380 */
 381static void sdma_v2_4_rlc_stop(struct amdgpu_device *adev)
 382{
 383	/* XXX todo */
 384}
 385
 386/**
 387 * sdma_v2_4_enable - stop the async dma engines
 388 *
 389 * @adev: amdgpu_device pointer
 390 * @enable: enable/disable the DMA MEs.
 391 *
 392 * Halt or unhalt the async dma engines (VI).
 393 */
 394static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
 395{
 396	u32 f32_cntl;
 397	int i;
 398
 399	if (enable == false) {
 400		sdma_v2_4_gfx_stop(adev);
 401		sdma_v2_4_rlc_stop(adev);
 402	}
 403
 404	for (i = 0; i < adev->sdma.num_instances; i++) {
 405		f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
 406		if (enable)
 407			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
 408		else
 409			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
 410		WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
 411	}
 412}
 413
 414/**
 415 * sdma_v2_4_gfx_resume - setup and start the async dma engines
 416 *
 417 * @adev: amdgpu_device pointer
 418 *
 419 * Set up the gfx DMA ring buffers and enable them (VI).
 420 * Returns 0 for success, error for failure.
 421 */
 422static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
 423{
 424	struct amdgpu_ring *ring;
 425	u32 rb_cntl, ib_cntl;
 426	u32 rb_bufsz;
 427	u32 wb_offset;
 428	int i, j, r;
 429
 430	for (i = 0; i < adev->sdma.num_instances; i++) {
 431		ring = &adev->sdma.instance[i].ring;
 432		wb_offset = (ring->rptr_offs * 4);
 433
 434		mutex_lock(&adev->srbm_mutex);
 435		for (j = 0; j < 16; j++) {
 436			vi_srbm_select(adev, 0, 0, 0, j);
 437			/* SDMA GFX */
 438			WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
 439			WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
 440		}
 441		vi_srbm_select(adev, 0, 0, 0, 0);
 442		mutex_unlock(&adev->srbm_mutex);
 443
 444		WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
 445		       adev->gfx.config.gb_addr_config & 0x70);
 446
 447		WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
 448
 449		/* Set ring buffer size in dwords */
 450		rb_bufsz = order_base_2(ring->ring_size / 4);
 451		rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
 452		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
 453#ifdef __BIG_ENDIAN
 454		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
 455		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
 456					RPTR_WRITEBACK_SWAP_ENABLE, 1);
 457#endif
 458		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 459
 460		/* Initialize the ring buffer's read and write pointers */
 461		WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
 462		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
 
 
 463
 464		/* set the wb address whether it's enabled or not */
 465		WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
 466		       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
 467		WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
 468		       lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
 469
 470		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
 471
 472		WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
 473		WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
 474
 475		ring->wptr = 0;
 476		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
 477
 478		/* enable DMA RB */
 479		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
 480		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 481
 482		ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
 483		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
 484#ifdef __BIG_ENDIAN
 485		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
 486#endif
 487		/* enable DMA IBs */
 488		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 489
 490		ring->ready = true;
 
 491
 
 
 
 492		r = amdgpu_ring_test_ring(ring);
 493		if (r) {
 494			ring->ready = false;
 495			return r;
 496		}
 497
 498		if (adev->mman.buffer_funcs_ring == ring)
 499			amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
 500	}
 501
 502	return 0;
 503}
 504
 505/**
 506 * sdma_v2_4_rlc_resume - setup and start the async dma engines
 507 *
 508 * @adev: amdgpu_device pointer
 509 *
 510 * Set up the compute DMA queues and enable them (VI).
 511 * Returns 0 for success, error for failure.
 512 */
 513static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev)
 514{
 515	/* XXX todo */
 516	return 0;
 517}
 518
 519/**
 520 * sdma_v2_4_load_microcode - load the sDMA ME ucode
 521 *
 522 * @adev: amdgpu_device pointer
 523 *
 524 * Loads the sDMA0/1 ucode.
 525 * Returns 0 for success, -EINVAL if the ucode is not available.
 526 */
 527static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
 528{
 529	const struct sdma_firmware_header_v1_0 *hdr;
 530	const __le32 *fw_data;
 531	u32 fw_size;
 532	int i, j;
 533
 534	/* halt the MEs */
 535	sdma_v2_4_enable(adev, false);
 536
 537	for (i = 0; i < adev->sdma.num_instances; i++) {
 538		if (!adev->sdma.instance[i].fw)
 539			return -EINVAL;
 540		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
 541		amdgpu_ucode_print_sdma_hdr(&hdr->header);
 542		fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 543		fw_data = (const __le32 *)
 544			(adev->sdma.instance[i].fw->data +
 545			 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 546		WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
 547		for (j = 0; j < fw_size; j++)
 548			WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
 549		WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
 550	}
 551
 552	return 0;
 553}
 554
 555/**
 556 * sdma_v2_4_start - setup and start the async dma engines
 557 *
 558 * @adev: amdgpu_device pointer
 559 *
 560 * Set up the DMA engines and enable them (VI).
 561 * Returns 0 for success, error for failure.
 562 */
 563static int sdma_v2_4_start(struct amdgpu_device *adev)
 564{
 565	int r;
 566
 567	if (!adev->firmware.smu_load) {
 
 568		r = sdma_v2_4_load_microcode(adev);
 569		if (r)
 570			return r;
 571	} else {
 572		r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
 573						AMDGPU_UCODE_ID_SDMA0);
 574		if (r)
 575			return -EINVAL;
 576		r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
 577						AMDGPU_UCODE_ID_SDMA1);
 578		if (r)
 579			return -EINVAL;
 580	}
 581
 582	/* unhalt the MEs */
 583	sdma_v2_4_enable(adev, true);
 584
 585	/* start the gfx rings and rlc compute queues */
 586	r = sdma_v2_4_gfx_resume(adev);
 587	if (r)
 588		return r;
 589	r = sdma_v2_4_rlc_resume(adev);
 590	if (r)
 591		return r;
 592
 593	return 0;
 594}
 595
 596/**
 597 * sdma_v2_4_ring_test_ring - simple async dma engine test
 598 *
 599 * @ring: amdgpu_ring structure holding ring information
 600 *
 601 * Test the DMA engine by writing using it to write an
 602 * value to memory. (VI).
 603 * Returns 0 for success, error for failure.
 604 */
 605static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
 606{
 607	struct amdgpu_device *adev = ring->adev;
 608	unsigned i;
 609	unsigned index;
 610	int r;
 611	u32 tmp;
 612	u64 gpu_addr;
 613
 614	r = amdgpu_wb_get(adev, &index);
 615	if (r) {
 616		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
 617		return r;
 618	}
 619
 620	gpu_addr = adev->wb.gpu_addr + (index * 4);
 621	tmp = 0xCAFEDEAD;
 622	adev->wb.wb[index] = cpu_to_le32(tmp);
 623
 624	r = amdgpu_ring_alloc(ring, 5);
 625	if (r) {
 626		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
 627		amdgpu_wb_free(adev, index);
 628		return r;
 629	}
 630
 631	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 632			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
 633	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
 634	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
 635	amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
 636	amdgpu_ring_write(ring, 0xDEADBEEF);
 637	amdgpu_ring_commit(ring);
 638
 639	for (i = 0; i < adev->usec_timeout; i++) {
 640		tmp = le32_to_cpu(adev->wb.wb[index]);
 641		if (tmp == 0xDEADBEEF)
 642			break;
 643		DRM_UDELAY(1);
 644	}
 645
 646	if (i < adev->usec_timeout) {
 647		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
 648	} else {
 649		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
 650			  ring->idx, tmp);
 651		r = -EINVAL;
 652	}
 653	amdgpu_wb_free(adev, index);
 654
 655	return r;
 656}
 657
 658/**
 659 * sdma_v2_4_ring_test_ib - test an IB on the DMA engine
 660 *
 661 * @ring: amdgpu_ring structure holding ring information
 662 *
 663 * Test a simple IB in the DMA ring (VI).
 664 * Returns 0 on success, error on failure.
 665 */
 666static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
 667{
 668	struct amdgpu_device *adev = ring->adev;
 669	struct amdgpu_ib ib;
 670	struct fence *f = NULL;
 671	unsigned i;
 672	unsigned index;
 673	int r;
 674	u32 tmp = 0;
 675	u64 gpu_addr;
 
 676
 677	r = amdgpu_wb_get(adev, &index);
 678	if (r) {
 679		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
 680		return r;
 681	}
 682
 683	gpu_addr = adev->wb.gpu_addr + (index * 4);
 684	tmp = 0xCAFEDEAD;
 685	adev->wb.wb[index] = cpu_to_le32(tmp);
 686	memset(&ib, 0, sizeof(ib));
 687	r = amdgpu_ib_get(adev, NULL, 256, &ib);
 688	if (r) {
 689		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
 690		goto err0;
 691	}
 692
 693	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 694		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
 695	ib.ptr[1] = lower_32_bits(gpu_addr);
 696	ib.ptr[2] = upper_32_bits(gpu_addr);
 697	ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
 698	ib.ptr[4] = 0xDEADBEEF;
 699	ib.ptr[5] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 700	ib.ptr[6] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 701	ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 702	ib.length_dw = 8;
 703
 704	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 705	if (r)
 706		goto err1;
 707
 708	r = fence_wait(f, false);
 709	if (r) {
 710		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 
 
 
 
 711		goto err1;
 712	}
 713	for (i = 0; i < adev->usec_timeout; i++) {
 714		tmp = le32_to_cpu(adev->wb.wb[index]);
 715		if (tmp == 0xDEADBEEF)
 716			break;
 717		DRM_UDELAY(1);
 718	}
 719	if (i < adev->usec_timeout) {
 720		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
 721			 ring->idx, i);
 722		goto err1;
 723	} else {
 724		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
 725		r = -EINVAL;
 726	}
 727
 728err1:
 729	fence_put(f);
 730	amdgpu_ib_free(adev, &ib, NULL);
 731	fence_put(f);
 732err0:
 733	amdgpu_wb_free(adev, index);
 734	return r;
 735}
 736
 737/**
 738 * sdma_v2_4_vm_copy_pte - update PTEs by copying them from the GART
 739 *
 740 * @ib: indirect buffer to fill with commands
 741 * @pe: addr of the page entry
 742 * @src: src addr to copy from
 743 * @count: number of page entries to update
 744 *
 745 * Update PTEs by copying them from the GART using sDMA (CIK).
 746 */
 747static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
 748				  uint64_t pe, uint64_t src,
 749				  unsigned count)
 750{
 751	while (count) {
 752		unsigned bytes = count * 8;
 753		if (bytes > 0x1FFFF8)
 754			bytes = 0x1FFFF8;
 755
 756		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
 757			SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
 758		ib->ptr[ib->length_dw++] = bytes;
 759		ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
 760		ib->ptr[ib->length_dw++] = lower_32_bits(src);
 761		ib->ptr[ib->length_dw++] = upper_32_bits(src);
 762		ib->ptr[ib->length_dw++] = lower_32_bits(pe);
 763		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 764
 765		pe += bytes;
 766		src += bytes;
 767		count -= bytes / 8;
 768	}
 769}
 770
 771/**
 772 * sdma_v2_4_vm_write_pte - update PTEs by writing them manually
 773 *
 774 * @ib: indirect buffer to fill with commands
 775 * @pe: addr of the page entry
 776 * @addr: dst addr to write into pe
 777 * @count: number of page entries to update
 778 * @incr: increase next addr by incr bytes
 779 * @flags: access flags
 780 *
 781 * Update PTEs by writing them manually using sDMA (CIK).
 782 */
 783static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
 784				   const dma_addr_t *pages_addr, uint64_t pe,
 785				   uint64_t addr, unsigned count,
 786				   uint32_t incr, uint32_t flags)
 787{
 788	uint64_t value;
 789	unsigned ndw;
 790
 791	while (count) {
 792		ndw = count * 2;
 793		if (ndw > 0xFFFFE)
 794			ndw = 0xFFFFE;
 795
 796		/* for non-physically contiguous pages (system) */
 797		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 798			SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
 799		ib->ptr[ib->length_dw++] = pe;
 800		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 801		ib->ptr[ib->length_dw++] = ndw;
 802		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
 803			value = amdgpu_vm_map_gart(pages_addr, addr);
 804			addr += incr;
 805			value |= flags;
 806			ib->ptr[ib->length_dw++] = value;
 807			ib->ptr[ib->length_dw++] = upper_32_bits(value);
 808		}
 809	}
 810}
 811
 812/**
 813 * sdma_v2_4_vm_set_pte_pde - update the page tables using sDMA
 814 *
 815 * @ib: indirect buffer to fill with commands
 816 * @pe: addr of the page entry
 817 * @addr: dst addr to write into pe
 818 * @count: number of page entries to update
 819 * @incr: increase next addr by incr bytes
 820 * @flags: access flags
 821 *
 822 * Update the page tables using sDMA (CIK).
 823 */
 824static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib,
 825				     uint64_t pe,
 826				     uint64_t addr, unsigned count,
 827				     uint32_t incr, uint32_t flags)
 828{
 829	uint64_t value;
 830	unsigned ndw;
 831
 832	while (count) {
 833		ndw = count;
 834		if (ndw > 0x7FFFF)
 835			ndw = 0x7FFFF;
 836
 837		if (flags & AMDGPU_PTE_VALID)
 838			value = addr;
 839		else
 840			value = 0;
 841
 842		/* for physically contiguous pages (vram) */
 843		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
 844		ib->ptr[ib->length_dw++] = pe; /* dst addr */
 845		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 846		ib->ptr[ib->length_dw++] = flags; /* mask */
 847		ib->ptr[ib->length_dw++] = 0;
 848		ib->ptr[ib->length_dw++] = value; /* value */
 849		ib->ptr[ib->length_dw++] = upper_32_bits(value);
 850		ib->ptr[ib->length_dw++] = incr; /* increment size */
 851		ib->ptr[ib->length_dw++] = 0;
 852		ib->ptr[ib->length_dw++] = ndw; /* number of entries */
 853
 854		pe += ndw * 8;
 855		addr += ndw * incr;
 856		count -= ndw;
 857	}
 858}
 859
 860/**
 861 * sdma_v2_4_ring_pad_ib - pad the IB to the required number of dw
 862 *
 863 * @ib: indirect buffer to fill with padding
 864 *
 865 */
 866static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 867{
 868	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
 869	u32 pad_count;
 870	int i;
 871
 872	pad_count = (8 - (ib->length_dw & 0x7)) % 8;
 873	for (i = 0; i < pad_count; i++)
 874		if (sdma && sdma->burst_nop && (i == 0))
 875			ib->ptr[ib->length_dw++] =
 876				SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
 877				SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
 878		else
 879			ib->ptr[ib->length_dw++] =
 880				SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 881}
 882
 883/**
 884 * sdma_v2_4_ring_emit_pipeline_sync - sync the pipeline
 885 *
 886 * @ring: amdgpu_ring pointer
 887 *
 888 * Make sure all previous operations are completed (CIK).
 889 */
 890static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 891{
 892	uint32_t seq = ring->fence_drv.sync_seq;
 893	uint64_t addr = ring->fence_drv.gpu_addr;
 894
 895	/* wait for idle */
 896	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
 897			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
 898			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
 899			  SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
 900	amdgpu_ring_write(ring, addr & 0xfffffffc);
 901	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
 902	amdgpu_ring_write(ring, seq); /* reference */
 903	amdgpu_ring_write(ring, 0xfffffff); /* mask */
 904	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
 905			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
 906}
 907
 908/**
 909 * sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA
 910 *
 911 * @ring: amdgpu_ring pointer
 912 * @vm: amdgpu_vm pointer
 913 *
 914 * Update the page table base and flush the VM TLB
 915 * using sDMA (VI).
 916 */
 917static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
 918					 unsigned vm_id, uint64_t pd_addr)
 919{
 920	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
 921			  SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
 922	if (vm_id < 8) {
 923		amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
 924	} else {
 925		amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
 926	}
 927	amdgpu_ring_write(ring, pd_addr >> 12);
 928
 929	/* flush TLB */
 930	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
 931			  SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
 932	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
 933	amdgpu_ring_write(ring, 1 << vm_id);
 934
 935	/* wait for flush */
 936	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
 937			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
 938			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
 939	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
 940	amdgpu_ring_write(ring, 0);
 941	amdgpu_ring_write(ring, 0); /* reference */
 942	amdgpu_ring_write(ring, 0); /* mask */
 943	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
 944			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 945}
 946
 
 
 
 
 
 
 
 
 
 947static int sdma_v2_4_early_init(void *handle)
 948{
 949	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 950
 951	adev->sdma.num_instances = SDMA_MAX_INSTANCE;
 952
 953	sdma_v2_4_set_ring_funcs(adev);
 954	sdma_v2_4_set_buffer_funcs(adev);
 955	sdma_v2_4_set_vm_pte_funcs(adev);
 956	sdma_v2_4_set_irq_funcs(adev);
 957
 958	return 0;
 959}
 960
 961static int sdma_v2_4_sw_init(void *handle)
 962{
 963	struct amdgpu_ring *ring;
 964	int r, i;
 965	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 966
 967	/* SDMA trap event */
 968	r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
 
 969	if (r)
 970		return r;
 971
 972	/* SDMA Privileged inst */
 973	r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
 
 974	if (r)
 975		return r;
 976
 977	/* SDMA Privileged inst */
 978	r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
 
 979	if (r)
 980		return r;
 981
 982	r = sdma_v2_4_init_microcode(adev);
 983	if (r) {
 984		DRM_ERROR("Failed to load sdma firmware!\n");
 985		return r;
 986	}
 987
 988	for (i = 0; i < adev->sdma.num_instances; i++) {
 989		ring = &adev->sdma.instance[i].ring;
 990		ring->ring_obj = NULL;
 991		ring->use_doorbell = false;
 992		sprintf(ring->name, "sdma%d", i);
 993		r = amdgpu_ring_init(adev, ring, 256 * 1024,
 994				     SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
 995				     &adev->sdma.trap_irq,
 996				     (i == 0) ?
 997				     AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
 998				     AMDGPU_RING_TYPE_SDMA);
 999		if (r)
1000			return r;
1001	}
1002
1003	return r;
1004}
1005
1006static int sdma_v2_4_sw_fini(void *handle)
1007{
1008	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1009	int i;
1010
1011	for (i = 0; i < adev->sdma.num_instances; i++)
1012		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1013
 
1014	return 0;
1015}
1016
1017static int sdma_v2_4_hw_init(void *handle)
1018{
1019	int r;
1020	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1021
1022	sdma_v2_4_init_golden_registers(adev);
1023
1024	r = sdma_v2_4_start(adev);
1025	if (r)
1026		return r;
1027
1028	return r;
1029}
1030
1031static int sdma_v2_4_hw_fini(void *handle)
1032{
1033	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1034
1035	sdma_v2_4_enable(adev, false);
1036
1037	return 0;
1038}
1039
1040static int sdma_v2_4_suspend(void *handle)
1041{
1042	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1043
1044	return sdma_v2_4_hw_fini(adev);
1045}
1046
1047static int sdma_v2_4_resume(void *handle)
1048{
1049	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1050
1051	return sdma_v2_4_hw_init(adev);
1052}
1053
1054static bool sdma_v2_4_is_idle(void *handle)
1055{
1056	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1057	u32 tmp = RREG32(mmSRBM_STATUS2);
1058
1059	if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
1060		   SRBM_STATUS2__SDMA1_BUSY_MASK))
1061	    return false;
1062
1063	return true;
1064}
1065
1066static int sdma_v2_4_wait_for_idle(void *handle)
1067{
1068	unsigned i;
1069	u32 tmp;
1070	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1071
1072	for (i = 0; i < adev->usec_timeout; i++) {
1073		tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
1074				SRBM_STATUS2__SDMA1_BUSY_MASK);
1075
1076		if (!tmp)
1077			return 0;
1078		udelay(1);
1079	}
1080	return -ETIMEDOUT;
1081}
1082
1083static void sdma_v2_4_print_status(void *handle)
1084{
1085	int i, j;
1086	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1087
1088	dev_info(adev->dev, "VI SDMA registers\n");
1089	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
1090		 RREG32(mmSRBM_STATUS2));
1091	for (i = 0; i < adev->sdma.num_instances; i++) {
1092		dev_info(adev->dev, "  SDMA%d_STATUS_REG=0x%08X\n",
1093			 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
1094		dev_info(adev->dev, "  SDMA%d_F32_CNTL=0x%08X\n",
1095			 i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
1096		dev_info(adev->dev, "  SDMA%d_CNTL=0x%08X\n",
1097			 i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
1098		dev_info(adev->dev, "  SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
1099			 i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
1100		dev_info(adev->dev, "  SDMA%d_GFX_IB_CNTL=0x%08X\n",
1101			 i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
1102		dev_info(adev->dev, "  SDMA%d_GFX_RB_CNTL=0x%08X\n",
1103			 i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
1104		dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR=0x%08X\n",
1105			 i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
1106		dev_info(adev->dev, "  SDMA%d_GFX_RB_WPTR=0x%08X\n",
1107			 i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
1108		dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
1109			 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
1110		dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
1111			 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
1112		dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE=0x%08X\n",
1113			 i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
1114		dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
1115			 i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
1116		dev_info(adev->dev, "  SDMA%d_TILING_CONFIG=0x%08X\n",
1117			 i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i]));
1118		mutex_lock(&adev->srbm_mutex);
1119		for (j = 0; j < 16; j++) {
1120			vi_srbm_select(adev, 0, 0, 0, j);
1121			dev_info(adev->dev, "  VM %d:\n", j);
1122			dev_info(adev->dev, "  SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n",
1123				 i, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
1124			dev_info(adev->dev, "  SDMA%d_GFX_APE1_CNTL=0x%08X\n",
1125				 i, RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
1126		}
1127		vi_srbm_select(adev, 0, 0, 0, 0);
1128		mutex_unlock(&adev->srbm_mutex);
1129	}
1130}
1131
1132static int sdma_v2_4_soft_reset(void *handle)
1133{
1134	u32 srbm_soft_reset = 0;
1135	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1136	u32 tmp = RREG32(mmSRBM_STATUS2);
1137
1138	if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
1139		/* sdma0 */
1140		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
1141		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
1142		WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
1143		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
1144	}
1145	if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
1146		/* sdma1 */
1147		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
1148		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
1149		WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
1150		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
1151	}
1152
1153	if (srbm_soft_reset) {
1154		sdma_v2_4_print_status((void *)adev);
1155
1156		tmp = RREG32(mmSRBM_SOFT_RESET);
1157		tmp |= srbm_soft_reset;
1158		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1159		WREG32(mmSRBM_SOFT_RESET, tmp);
1160		tmp = RREG32(mmSRBM_SOFT_RESET);
1161
1162		udelay(50);
1163
1164		tmp &= ~srbm_soft_reset;
1165		WREG32(mmSRBM_SOFT_RESET, tmp);
1166		tmp = RREG32(mmSRBM_SOFT_RESET);
1167
1168		/* Wait a little for things to settle down */
1169		udelay(50);
1170
1171		sdma_v2_4_print_status((void *)adev);
1172	}
1173
1174	return 0;
1175}
1176
1177static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
1178					struct amdgpu_irq_src *src,
1179					unsigned type,
1180					enum amdgpu_interrupt_state state)
1181{
1182	u32 sdma_cntl;
1183
1184	switch (type) {
1185	case AMDGPU_SDMA_IRQ_TRAP0:
1186		switch (state) {
1187		case AMDGPU_IRQ_STATE_DISABLE:
1188			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1189			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1190			WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1191			break;
1192		case AMDGPU_IRQ_STATE_ENABLE:
1193			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1194			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1195			WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1196			break;
1197		default:
1198			break;
1199		}
1200		break;
1201	case AMDGPU_SDMA_IRQ_TRAP1:
1202		switch (state) {
1203		case AMDGPU_IRQ_STATE_DISABLE:
1204			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1205			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1206			WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1207			break;
1208		case AMDGPU_IRQ_STATE_ENABLE:
1209			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1210			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1211			WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1212			break;
1213		default:
1214			break;
1215		}
1216		break;
1217	default:
1218		break;
1219	}
1220	return 0;
1221}
1222
1223static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
1224				      struct amdgpu_irq_src *source,
1225				      struct amdgpu_iv_entry *entry)
1226{
1227	u8 instance_id, queue_id;
1228
1229	instance_id = (entry->ring_id & 0x3) >> 0;
1230	queue_id = (entry->ring_id & 0xc) >> 2;
1231	DRM_DEBUG("IH: SDMA trap\n");
1232	switch (instance_id) {
1233	case 0:
1234		switch (queue_id) {
1235		case 0:
1236			amdgpu_fence_process(&adev->sdma.instance[0].ring);
1237			break;
1238		case 1:
1239			/* XXX compute */
1240			break;
1241		case 2:
1242			/* XXX compute */
1243			break;
1244		}
1245		break;
1246	case 1:
1247		switch (queue_id) {
1248		case 0:
1249			amdgpu_fence_process(&adev->sdma.instance[1].ring);
1250			break;
1251		case 1:
1252			/* XXX compute */
1253			break;
1254		case 2:
1255			/* XXX compute */
1256			break;
1257		}
1258		break;
1259	}
1260	return 0;
1261}
1262
1263static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
1264					      struct amdgpu_irq_src *source,
1265					      struct amdgpu_iv_entry *entry)
1266{
1267	DRM_ERROR("Illegal instruction in SDMA command stream\n");
1268	schedule_work(&adev->reset_work);
1269	return 0;
1270}
1271
1272static int sdma_v2_4_set_clockgating_state(void *handle,
1273					  enum amd_clockgating_state state)
1274{
1275	/* XXX handled via the smc on VI */
1276	return 0;
1277}
1278
1279static int sdma_v2_4_set_powergating_state(void *handle,
1280					  enum amd_powergating_state state)
1281{
1282	return 0;
1283}
1284
1285const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
 
1286	.early_init = sdma_v2_4_early_init,
1287	.late_init = NULL,
1288	.sw_init = sdma_v2_4_sw_init,
1289	.sw_fini = sdma_v2_4_sw_fini,
1290	.hw_init = sdma_v2_4_hw_init,
1291	.hw_fini = sdma_v2_4_hw_fini,
1292	.suspend = sdma_v2_4_suspend,
1293	.resume = sdma_v2_4_resume,
1294	.is_idle = sdma_v2_4_is_idle,
1295	.wait_for_idle = sdma_v2_4_wait_for_idle,
1296	.soft_reset = sdma_v2_4_soft_reset,
1297	.print_status = sdma_v2_4_print_status,
1298	.set_clockgating_state = sdma_v2_4_set_clockgating_state,
1299	.set_powergating_state = sdma_v2_4_set_powergating_state,
1300};
1301
1302static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
 
 
 
 
1303	.get_rptr = sdma_v2_4_ring_get_rptr,
1304	.get_wptr = sdma_v2_4_ring_get_wptr,
1305	.set_wptr = sdma_v2_4_ring_set_wptr,
1306	.parse_cs = NULL,
 
 
 
 
 
 
1307	.emit_ib = sdma_v2_4_ring_emit_ib,
1308	.emit_fence = sdma_v2_4_ring_emit_fence,
1309	.emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
1310	.emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
1311	.emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
1312	.emit_hdp_invalidate = sdma_v2_4_ring_emit_hdp_invalidate,
1313	.test_ring = sdma_v2_4_ring_test_ring,
1314	.test_ib = sdma_v2_4_ring_test_ib,
1315	.insert_nop = sdma_v2_4_ring_insert_nop,
1316	.pad_ib = sdma_v2_4_ring_pad_ib,
 
1317};
1318
1319static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
1320{
1321	int i;
1322
1323	for (i = 0; i < adev->sdma.num_instances; i++)
1324		adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
1325}
1326
1327static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
1328	.set = sdma_v2_4_set_trap_irq_state,
1329	.process = sdma_v2_4_process_trap_irq,
1330};
1331
1332static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = {
1333	.process = sdma_v2_4_process_illegal_inst_irq,
1334};
1335
1336static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
1337{
1338	adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1339	adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
1340	adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
1341}
1342
1343/**
1344 * sdma_v2_4_emit_copy_buffer - copy buffer using the sDMA engine
1345 *
1346 * @ring: amdgpu_ring structure holding ring information
1347 * @src_offset: src GPU address
1348 * @dst_offset: dst GPU address
1349 * @byte_count: number of bytes to xfer
1350 *
1351 * Copy GPU buffers using the DMA engine (VI).
1352 * Used by the amdgpu ttm implementation to move pages if
1353 * registered as the asic copy callback.
1354 */
1355static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
1356				       uint64_t src_offset,
1357				       uint64_t dst_offset,
1358				       uint32_t byte_count)
1359{
1360	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1361		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1362	ib->ptr[ib->length_dw++] = byte_count;
1363	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1364	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1365	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1366	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1367	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1368}
1369
1370/**
1371 * sdma_v2_4_emit_fill_buffer - fill buffer using the sDMA engine
1372 *
1373 * @ring: amdgpu_ring structure holding ring information
1374 * @src_data: value to write to buffer
1375 * @dst_offset: dst GPU address
1376 * @byte_count: number of bytes to xfer
1377 *
1378 * Fill GPU buffers using the DMA engine (VI).
1379 */
1380static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ib *ib,
1381				       uint32_t src_data,
1382				       uint64_t dst_offset,
1383				       uint32_t byte_count)
1384{
1385	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1386	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1387	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1388	ib->ptr[ib->length_dw++] = src_data;
1389	ib->ptr[ib->length_dw++] = byte_count;
1390}
1391
1392static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
1393	.copy_max_bytes = 0x1fffff,
1394	.copy_num_dw = 7,
1395	.emit_copy_buffer = sdma_v2_4_emit_copy_buffer,
1396
1397	.fill_max_bytes = 0x1fffff,
1398	.fill_num_dw = 7,
1399	.emit_fill_buffer = sdma_v2_4_emit_fill_buffer,
1400};
1401
1402static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
1403{
1404	if (adev->mman.buffer_funcs == NULL) {
1405		adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
1406		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1407	}
1408}
1409
1410static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
 
1411	.copy_pte = sdma_v2_4_vm_copy_pte,
 
1412	.write_pte = sdma_v2_4_vm_write_pte,
1413	.set_pte_pde = sdma_v2_4_vm_set_pte_pde,
1414};
1415
1416static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
1417{
1418	unsigned i;
1419
1420	if (adev->vm_manager.vm_pte_funcs == NULL) {
1421		adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
1422		for (i = 0; i < adev->sdma.num_instances; i++)
1423			adev->vm_manager.vm_pte_rings[i] =
1424				&adev->sdma.instance[i].ring;
1425
1426		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
1427	}
1428}