Linux Audio

Check our new training course

Loading...
v6.13.7
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24
  25#include <linux/delay.h>
  26#include <linux/firmware.h>
  27#include <linux/module.h>
  28
  29#include "amdgpu.h"
  30#include "amdgpu_ucode.h"
  31#include "amdgpu_trace.h"
  32#include "vi.h"
  33#include "vid.h"
  34
  35#include "oss/oss_2_4_d.h"
  36#include "oss/oss_2_4_sh_mask.h"
  37
  38#include "gmc/gmc_7_1_d.h"
  39#include "gmc/gmc_7_1_sh_mask.h"
  40
  41#include "gca/gfx_8_0_d.h"
  42#include "gca/gfx_8_0_enum.h"
  43#include "gca/gfx_8_0_sh_mask.h"
  44
  45#include "bif/bif_5_0_d.h"
  46#include "bif/bif_5_0_sh_mask.h"
  47
  48#include "iceland_sdma_pkt_open.h"
  49
  50#include "ivsrcid/ivsrcid_vislands30.h"
  51
  52static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
  53static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
  54static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
  55static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev);
  56
  57MODULE_FIRMWARE("amdgpu/topaz_sdma.bin");
  58MODULE_FIRMWARE("amdgpu/topaz_sdma1.bin");
  59
  60static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = {
 
  61	SDMA0_REGISTER_OFFSET,
  62	SDMA1_REGISTER_OFFSET
  63};
  64
  65static const u32 golden_settings_iceland_a11[] = {
 
  66	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
  67	mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
  68	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
  69	mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
  70};
  71
  72static const u32 iceland_mgcg_cgcg_init[] = {
 
  73	mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
  74	mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
  75};
  76
  77/*
  78 * sDMA - System DMA
  79 * Starting with CIK, the GPU has new asynchronous
  80 * DMA engines.  These engines are used for compute
  81 * and gfx.  There are two DMA engines (SDMA0, SDMA1)
  82 * and each one supports 1 ring buffer used for gfx
  83 * and 2 queues used for compute.
  84 *
  85 * The programming model is very similar to the CP
  86 * (ring buffer, IBs, etc.), but sDMA has it's own
  87 * packet format that is different from the PM4 format
  88 * used by the CP. sDMA supports copying data, writing
  89 * embedded data, solid fills, and a number of other
  90 * things.  It also has support for tiling/detiling of
  91 * buffers.
  92 */
  93
  94static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
  95{
  96	switch (adev->asic_type) {
  97	case CHIP_TOPAZ:
  98		amdgpu_device_program_register_sequence(adev,
  99							iceland_mgcg_cgcg_init,
 100							ARRAY_SIZE(iceland_mgcg_cgcg_init));
 101		amdgpu_device_program_register_sequence(adev,
 102							golden_settings_iceland_a11,
 103							ARRAY_SIZE(golden_settings_iceland_a11));
 104		break;
 105	default:
 106		break;
 107	}
 108}
 109
 110static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
 111{
 112	int i;
 113
 114	for (i = 0; i < adev->sdma.num_instances; i++)
 115		amdgpu_ucode_release(&adev->sdma.instance[i].fw);
 
 116}
 117
 118/**
 119 * sdma_v2_4_init_microcode - load ucode images from disk
 120 *
 121 * @adev: amdgpu_device pointer
 122 *
 123 * Use the firmware interface to load the ucode images into
 124 * the driver (not loaded into hw).
 125 * Returns 0 on success, error on failure.
 126 */
 127static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
 128{
 129	const char *chip_name;
 
 130	int err = 0, i;
 131	struct amdgpu_firmware_info *info = NULL;
 132	const struct common_firmware_header *header = NULL;
 133	const struct sdma_firmware_header_v1_0 *hdr;
 134
 135	DRM_DEBUG("\n");
 136
 137	switch (adev->asic_type) {
 138	case CHIP_TOPAZ:
 139		chip_name = "topaz";
 140		break;
 141	default:
 142		BUG();
 143	}
 144
 145	for (i = 0; i < adev->sdma.num_instances; i++) {
 146		if (i == 0)
 147			err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
 148						   "amdgpu/%s_sdma.bin", chip_name);
 149		else
 150			err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
 151						   "amdgpu/%s_sdma1.bin", chip_name);
 
 
 
 152		if (err)
 153			goto out;
 154		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
 155		adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
 156		adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
 157		if (adev->sdma.instance[i].feature_version >= 20)
 158			adev->sdma.instance[i].burst_nop = true;
 159
 160		if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
 161			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
 162			info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
 163			info->fw = adev->sdma.instance[i].fw;
 164			header = (const struct common_firmware_header *)info->fw->data;
 165			adev->firmware.fw_size +=
 166				ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 167		}
 168	}
 169
 170out:
 171	if (err) {
 172		pr_err("sdma_v2_4: Failed to load firmware \"%s_sdma%s.bin\"\n",
 173		       chip_name, i == 0 ? "" : "1");
 174		for (i = 0; i < adev->sdma.num_instances; i++)
 175			amdgpu_ucode_release(&adev->sdma.instance[i].fw);
 
 
 
 176	}
 177	return err;
 178}
 179
 180/**
 181 * sdma_v2_4_ring_get_rptr - get the current read pointer
 182 *
 183 * @ring: amdgpu ring pointer
 184 *
 185 * Get the current rptr from the hardware (VI+).
 186 */
 187static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
 188{
 189	/* XXX check if swapping is necessary on BE */
 190	return *ring->rptr_cpu_addr >> 2;
 191}
 192
 193/**
 194 * sdma_v2_4_ring_get_wptr - get the current write pointer
 195 *
 196 * @ring: amdgpu ring pointer
 197 *
 198 * Get the current wptr from the hardware (VI+).
 199 */
 200static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
 201{
 202	struct amdgpu_device *adev = ring->adev;
 203	u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
 
 204
 205	return wptr;
 206}
 207
 208/**
 209 * sdma_v2_4_ring_set_wptr - commit the write pointer
 210 *
 211 * @ring: amdgpu ring pointer
 212 *
 213 * Write the wptr back to the hardware (VI+).
 214 */
 215static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
 216{
 217	struct amdgpu_device *adev = ring->adev;
 
 218
 219	WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], ring->wptr << 2);
 220}
 221
 222static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 223{
 224	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 225	int i;
 226
 227	for (i = 0; i < count; i++)
 228		if (sdma && sdma->burst_nop && (i == 0))
 229			amdgpu_ring_write(ring, ring->funcs->nop |
 230				SDMA_PKT_NOP_HEADER_COUNT(count - 1));
 231		else
 232			amdgpu_ring_write(ring, ring->funcs->nop);
 233}
 234
 235/**
 236 * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine
 237 *
 238 * @ring: amdgpu ring pointer
 239 * @job: job to retrieve vmid from
 240 * @ib: IB object to schedule
 241 * @flags: unused
 242 *
 243 * Schedule an IB in the DMA ring (VI).
 244 */
 245static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
 246				   struct amdgpu_job *job,
 247				   struct amdgpu_ib *ib,
 248				   uint32_t flags)
 249{
 250	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 251
 252	/* IB packet must end on a 8 DW boundary */
 253	sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
 254
 255	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
 256			  SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
 257	/* base must be 32 byte aligned */
 258	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
 259	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
 260	amdgpu_ring_write(ring, ib->length_dw);
 261	amdgpu_ring_write(ring, 0);
 262	amdgpu_ring_write(ring, 0);
 263
 264}
 265
 266/**
 267 * sdma_v2_4_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
 268 *
 269 * @ring: amdgpu ring pointer
 270 *
 271 * Emit an hdp flush packet on the requested DMA ring.
 272 */
 273static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 274{
 275	u32 ref_and_mask = 0;
 276
 277	if (ring->me == 0)
 278		ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
 279	else
 280		ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
 281
 282	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
 283			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
 284			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
 285	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
 286	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
 287	amdgpu_ring_write(ring, ref_and_mask); /* reference */
 288	amdgpu_ring_write(ring, ref_and_mask); /* mask */
 289	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
 290			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 291}
 292
 
 
 
 
 
 
 
 293/**
 294 * sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring
 295 *
 296 * @ring: amdgpu ring pointer
 297 * @addr: address
 298 * @seq: sequence number
 299 * @flags: fence related flags
 300 *
 301 * Add a DMA fence packet to the ring to write
 302 * the fence seq number and DMA trap packet to generate
 303 * an interrupt if needed (VI).
 304 */
 305static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 306				      unsigned flags)
 307{
 308	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
 309	/* write the fence */
 310	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
 311	amdgpu_ring_write(ring, lower_32_bits(addr));
 312	amdgpu_ring_write(ring, upper_32_bits(addr));
 313	amdgpu_ring_write(ring, lower_32_bits(seq));
 314
 315	/* optionally write high bits as well */
 316	if (write64bit) {
 317		addr += 4;
 318		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
 319		amdgpu_ring_write(ring, lower_32_bits(addr));
 320		amdgpu_ring_write(ring, upper_32_bits(addr));
 321		amdgpu_ring_write(ring, upper_32_bits(seq));
 322	}
 323
 324	/* generate an interrupt */
 325	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
 326	amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
 327}
 328
 329/**
 330 * sdma_v2_4_gfx_stop - stop the gfx async dma engines
 331 *
 332 * @adev: amdgpu_device pointer
 333 *
 334 * Stop the gfx async dma ring buffers (VI).
 335 */
 336static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
 337{
 
 
 338	u32 rb_cntl, ib_cntl;
 339	int i;
 340
 
 
 
 
 341	for (i = 0; i < adev->sdma.num_instances; i++) {
 342		rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
 343		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
 344		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 345		ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
 346		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
 347		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 348	}
 
 
 349}
 350
 351/**
 352 * sdma_v2_4_rlc_stop - stop the compute async dma engines
 353 *
 354 * @adev: amdgpu_device pointer
 355 *
 356 * Stop the compute async dma queues (VI).
 357 */
 358static void sdma_v2_4_rlc_stop(struct amdgpu_device *adev)
 359{
 360	/* XXX todo */
 361}
 362
 363/**
 364 * sdma_v2_4_enable - stop the async dma engines
 365 *
 366 * @adev: amdgpu_device pointer
 367 * @enable: enable/disable the DMA MEs.
 368 *
 369 * Halt or unhalt the async dma engines (VI).
 370 */
 371static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
 372{
 373	u32 f32_cntl;
 374	int i;
 375
 376	if (!enable) {
 377		sdma_v2_4_gfx_stop(adev);
 378		sdma_v2_4_rlc_stop(adev);
 379	}
 380
 381	for (i = 0; i < adev->sdma.num_instances; i++) {
 382		f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
 383		if (enable)
 384			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
 385		else
 386			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
 387		WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
 388	}
 389}
 390
 391/**
 392 * sdma_v2_4_gfx_resume - setup and start the async dma engines
 393 *
 394 * @adev: amdgpu_device pointer
 395 *
 396 * Set up the gfx DMA ring buffers and enable them (VI).
 397 * Returns 0 for success, error for failure.
 398 */
 399static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
 400{
 401	struct amdgpu_ring *ring;
 402	u32 rb_cntl, ib_cntl;
 403	u32 rb_bufsz;
 
 404	int i, j, r;
 405
 406	for (i = 0; i < adev->sdma.num_instances; i++) {
 407		ring = &adev->sdma.instance[i].ring;
 
 408
 409		mutex_lock(&adev->srbm_mutex);
 410		for (j = 0; j < 16; j++) {
 411			vi_srbm_select(adev, 0, 0, 0, j);
 412			/* SDMA GFX */
 413			WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
 414			WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
 415		}
 416		vi_srbm_select(adev, 0, 0, 0, 0);
 417		mutex_unlock(&adev->srbm_mutex);
 418
 419		WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
 420		       adev->gfx.config.gb_addr_config & 0x70);
 421
 422		WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
 423
 424		/* Set ring buffer size in dwords */
 425		rb_bufsz = order_base_2(ring->ring_size / 4);
 426		rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
 427		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
 428#ifdef __BIG_ENDIAN
 429		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
 430		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
 431					RPTR_WRITEBACK_SWAP_ENABLE, 1);
 432#endif
 433		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 434
 435		/* Initialize the ring buffer's read and write pointers */
 436		WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
 437		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
 438		WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
 439		WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
 440
 441		/* set the wb address whether it's enabled or not */
 442		WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
 443		       upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
 444		WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
 445		       lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
 446
 447		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
 448
 449		WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
 450		WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
 451
 452		ring->wptr = 0;
 453		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
 454
 455		/* enable DMA RB */
 456		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
 457		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 458
 459		ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
 460		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
 461#ifdef __BIG_ENDIAN
 462		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
 463#endif
 464		/* enable DMA IBs */
 465		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
 
 466	}
 467
 468	sdma_v2_4_enable(adev, true);
 469	for (i = 0; i < adev->sdma.num_instances; i++) {
 470		ring = &adev->sdma.instance[i].ring;
 471		r = amdgpu_ring_test_helper(ring);
 472		if (r)
 
 473			return r;
 
 
 
 
 474	}
 475
 476	return 0;
 477}
 478
 479/**
 480 * sdma_v2_4_rlc_resume - setup and start the async dma engines
 481 *
 482 * @adev: amdgpu_device pointer
 483 *
 484 * Set up the compute DMA queues and enable them (VI).
 485 * Returns 0 for success, error for failure.
 486 */
 487static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev)
 488{
 489	/* XXX todo */
 490	return 0;
 491}
 492
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 493
 494/**
 495 * sdma_v2_4_start - setup and start the async dma engines
 496 *
 497 * @adev: amdgpu_device pointer
 498 *
 499 * Set up the DMA engines and enable them (VI).
 500 * Returns 0 for success, error for failure.
 501 */
 502static int sdma_v2_4_start(struct amdgpu_device *adev)
 503{
 504	int r;
 505
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 506	/* halt the engine before programing */
 507	sdma_v2_4_enable(adev, false);
 508
 509	/* start the gfx rings and rlc compute queues */
 510	r = sdma_v2_4_gfx_resume(adev);
 511	if (r)
 512		return r;
 513	r = sdma_v2_4_rlc_resume(adev);
 514	if (r)
 515		return r;
 516
 517	return 0;
 518}
 519
 520/**
 521 * sdma_v2_4_ring_test_ring - simple async dma engine test
 522 *
 523 * @ring: amdgpu_ring structure holding ring information
 524 *
 525 * Test the DMA engine by writing using it to write an
 526 * value to memory. (VI).
 527 * Returns 0 for success, error for failure.
 528 */
 529static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
 530{
 531	struct amdgpu_device *adev = ring->adev;
 532	unsigned i;
 533	unsigned index;
 534	int r;
 535	u32 tmp;
 536	u64 gpu_addr;
 537
 538	r = amdgpu_device_wb_get(adev, &index);
 539	if (r)
 
 540		return r;
 
 541
 542	gpu_addr = adev->wb.gpu_addr + (index * 4);
 543	tmp = 0xCAFEDEAD;
 544	adev->wb.wb[index] = cpu_to_le32(tmp);
 545
 546	r = amdgpu_ring_alloc(ring, 5);
 547	if (r)
 548		goto error_free_wb;
 
 
 
 549
 550	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 551			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
 552	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
 553	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
 554	amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
 555	amdgpu_ring_write(ring, 0xDEADBEEF);
 556	amdgpu_ring_commit(ring);
 557
 558	for (i = 0; i < adev->usec_timeout; i++) {
 559		tmp = le32_to_cpu(adev->wb.wb[index]);
 560		if (tmp == 0xDEADBEEF)
 561			break;
 562		udelay(1);
 563	}
 564
 565	if (i >= adev->usec_timeout)
 566		r = -ETIMEDOUT;
 
 
 
 
 
 
 567
 568error_free_wb:
 569	amdgpu_device_wb_free(adev, index);
 570	return r;
 571}
 572
 573/**
 574 * sdma_v2_4_ring_test_ib - test an IB on the DMA engine
 575 *
 576 * @ring: amdgpu_ring structure holding ring information
 577 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 578 *
 579 * Test a simple IB in the DMA ring (VI).
 580 * Returns 0 on success, error on failure.
 581 */
 582static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 583{
 584	struct amdgpu_device *adev = ring->adev;
 585	struct amdgpu_ib ib;
 586	struct dma_fence *f = NULL;
 587	unsigned index;
 588	u32 tmp = 0;
 589	u64 gpu_addr;
 590	long r;
 591
 592	r = amdgpu_device_wb_get(adev, &index);
 593	if (r)
 
 594		return r;
 
 595
 596	gpu_addr = adev->wb.gpu_addr + (index * 4);
 597	tmp = 0xCAFEDEAD;
 598	adev->wb.wb[index] = cpu_to_le32(tmp);
 599	memset(&ib, 0, sizeof(ib));
 600	r = amdgpu_ib_get(adev, NULL, 256,
 601					AMDGPU_IB_POOL_DIRECT, &ib);
 602	if (r)
 603		goto err0;
 
 604
 605	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 606		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
 607	ib.ptr[1] = lower_32_bits(gpu_addr);
 608	ib.ptr[2] = upper_32_bits(gpu_addr);
 609	ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
 610	ib.ptr[4] = 0xDEADBEEF;
 611	ib.ptr[5] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 612	ib.ptr[6] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 613	ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 614	ib.length_dw = 8;
 615
 616	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 617	if (r)
 618		goto err1;
 619
 620	r = dma_fence_wait_timeout(f, false, timeout);
 621	if (r == 0) {
 
 622		r = -ETIMEDOUT;
 623		goto err1;
 624	} else if (r < 0) {
 
 625		goto err1;
 626	}
 627	tmp = le32_to_cpu(adev->wb.wb[index]);
 628	if (tmp == 0xDEADBEEF)
 
 629		r = 0;
 630	else
 
 631		r = -EINVAL;
 
 632
 633err1:
 634	amdgpu_ib_free(adev, &ib, NULL);
 635	dma_fence_put(f);
 636err0:
 637	amdgpu_device_wb_free(adev, index);
 638	return r;
 639}
 640
 641/**
 642 * sdma_v2_4_vm_copy_pte - update PTEs by copying them from the GART
 643 *
 644 * @ib: indirect buffer to fill with commands
 645 * @pe: addr of the page entry
 646 * @src: src addr to copy from
 647 * @count: number of page entries to update
 648 *
 649 * Update PTEs by copying them from the GART using sDMA (CIK).
 650 */
 651static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
 652				  uint64_t pe, uint64_t src,
 653				  unsigned count)
 654{
 655	unsigned bytes = count * 8;
 656
 657	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
 658		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
 659	ib->ptr[ib->length_dw++] = bytes;
 660	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
 661	ib->ptr[ib->length_dw++] = lower_32_bits(src);
 662	ib->ptr[ib->length_dw++] = upper_32_bits(src);
 663	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
 664	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 665}
 666
 667/**
 668 * sdma_v2_4_vm_write_pte - update PTEs by writing them manually
 669 *
 670 * @ib: indirect buffer to fill with commands
 671 * @pe: addr of the page entry
 672 * @value: dst addr to write into pe
 673 * @count: number of page entries to update
 674 * @incr: increase next addr by incr bytes
 675 *
 676 * Update PTEs by writing them manually using sDMA (CIK).
 677 */
 678static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
 679				   uint64_t value, unsigned count,
 680				   uint32_t incr)
 681{
 682	unsigned ndw = count * 2;
 683
 684	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 685		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
 686	ib->ptr[ib->length_dw++] = pe;
 687	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 688	ib->ptr[ib->length_dw++] = ndw;
 689	for (; ndw > 0; ndw -= 2) {
 690		ib->ptr[ib->length_dw++] = lower_32_bits(value);
 691		ib->ptr[ib->length_dw++] = upper_32_bits(value);
 692		value += incr;
 693	}
 694}
 695
 696/**
 697 * sdma_v2_4_vm_set_pte_pde - update the page tables using sDMA
 698 *
 699 * @ib: indirect buffer to fill with commands
 700 * @pe: addr of the page entry
 701 * @addr: dst addr to write into pe
 702 * @count: number of page entries to update
 703 * @incr: increase next addr by incr bytes
 704 * @flags: access flags
 705 *
 706 * Update the page tables using sDMA (CIK).
 707 */
 708static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
 709				     uint64_t addr, unsigned count,
 710				     uint32_t incr, uint64_t flags)
 711{
 712	/* for physically contiguous pages (vram) */
 713	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
 714	ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
 715	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 716	ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
 717	ib->ptr[ib->length_dw++] = upper_32_bits(flags);
 718	ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
 719	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 720	ib->ptr[ib->length_dw++] = incr; /* increment size */
 721	ib->ptr[ib->length_dw++] = 0;
 722	ib->ptr[ib->length_dw++] = count; /* number of entries */
 723}
 724
 725/**
 726 * sdma_v2_4_ring_pad_ib - pad the IB to the required number of dw
 727 *
 728 * @ring: amdgpu_ring structure holding ring information
 729 * @ib: indirect buffer to fill with padding
 730 *
 731 */
 732static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 733{
 734	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 735	u32 pad_count;
 736	int i;
 737
 738	pad_count = (-ib->length_dw) & 7;
 739	for (i = 0; i < pad_count; i++)
 740		if (sdma && sdma->burst_nop && (i == 0))
 741			ib->ptr[ib->length_dw++] =
 742				SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
 743				SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
 744		else
 745			ib->ptr[ib->length_dw++] =
 746				SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 747}
 748
 749/**
 750 * sdma_v2_4_ring_emit_pipeline_sync - sync the pipeline
 751 *
 752 * @ring: amdgpu_ring pointer
 753 *
 754 * Make sure all previous operations are completed (CIK).
 755 */
 756static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 757{
 758	uint32_t seq = ring->fence_drv.sync_seq;
 759	uint64_t addr = ring->fence_drv.gpu_addr;
 760
 761	/* wait for idle */
 762	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
 763			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
 764			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
 765			  SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
 766	amdgpu_ring_write(ring, addr & 0xfffffffc);
 767	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
 768	amdgpu_ring_write(ring, seq); /* reference */
 769	amdgpu_ring_write(ring, 0xffffffff); /* mask */
 770	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
 771			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
 772}
 773
 774/**
 775 * sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA
 776 *
 777 * @ring: amdgpu_ring pointer
 778 * @vmid: vmid number to use
 779 * @pd_addr: address
 780 *
 781 * Update the page table base and flush the VM TLB
 782 * using sDMA (VI).
 783 */
 784static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
 785					 unsigned vmid, uint64_t pd_addr)
 786{
 787	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 788
 789	/* wait for flush */
 790	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
 791			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
 792			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
 793	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
 794	amdgpu_ring_write(ring, 0);
 795	amdgpu_ring_write(ring, 0); /* reference */
 796	amdgpu_ring_write(ring, 0); /* mask */
 797	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
 798			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 799}
 800
 801static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring,
 802				     uint32_t reg, uint32_t val)
 803{
 804	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
 805			  SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
 806	amdgpu_ring_write(ring, reg);
 807	amdgpu_ring_write(ring, val);
 808}
 809
 810static int sdma_v2_4_early_init(struct amdgpu_ip_block *ip_block)
 811{
 812	struct amdgpu_device *adev = ip_block->adev;
 813	int r;
 814
 815	adev->sdma.num_instances = SDMA_MAX_INSTANCE;
 816
 817	r = sdma_v2_4_init_microcode(adev);
 818	if (r)
 819		return r;
 820
 821	sdma_v2_4_set_ring_funcs(adev);
 822	sdma_v2_4_set_buffer_funcs(adev);
 823	sdma_v2_4_set_vm_pte_funcs(adev);
 824	sdma_v2_4_set_irq_funcs(adev);
 825
 826	return 0;
 827}
 828
 829static int sdma_v2_4_sw_init(struct amdgpu_ip_block *ip_block)
 830{
 831	struct amdgpu_ring *ring;
 832	int r, i;
 833	struct amdgpu_device *adev = ip_block->adev;
 834
 835	/* SDMA trap event */
 836	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
 837			      &adev->sdma.trap_irq);
 838	if (r)
 839		return r;
 840
 841	/* SDMA Privileged inst */
 842	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
 843			      &adev->sdma.illegal_inst_irq);
 844	if (r)
 845		return r;
 846
 847	/* SDMA Privileged inst */
 848	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
 849			      &adev->sdma.illegal_inst_irq);
 850	if (r)
 851		return r;
 852
 
 
 
 
 
 
 853	for (i = 0; i < adev->sdma.num_instances; i++) {
 854		ring = &adev->sdma.instance[i].ring;
 855		ring->ring_obj = NULL;
 856		ring->use_doorbell = false;
 857		sprintf(ring->name, "sdma%d", i);
 858		r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
 859				     (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
 860				     AMDGPU_SDMA_IRQ_INSTANCE1,
 861				     AMDGPU_RING_PRIO_DEFAULT, NULL);
 
 862		if (r)
 863			return r;
 864	}
 865
 866	return r;
 867}
 868
 869static int sdma_v2_4_sw_fini(struct amdgpu_ip_block *ip_block)
 870{
 871	struct amdgpu_device *adev = ip_block->adev;
 872	int i;
 873
 874	for (i = 0; i < adev->sdma.num_instances; i++)
 875		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 876
 877	sdma_v2_4_free_microcode(adev);
 878	return 0;
 879}
 880
 881static int sdma_v2_4_hw_init(struct amdgpu_ip_block *ip_block)
 882{
 883	int r;
 884	struct amdgpu_device *adev = ip_block->adev;
 885
 886	sdma_v2_4_init_golden_registers(adev);
 887
 888	r = sdma_v2_4_start(adev);
 889	if (r)
 890		return r;
 891
 892	return r;
 893}
 894
 895static int sdma_v2_4_hw_fini(struct amdgpu_ip_block *ip_block)
 896{
 897	sdma_v2_4_enable(ip_block->adev, false);
 
 
 898
 899	return 0;
 900}
 901
 902static int sdma_v2_4_suspend(struct amdgpu_ip_block *ip_block)
 903{
 904	return sdma_v2_4_hw_fini(ip_block);
 
 
 905}
 906
 907static int sdma_v2_4_resume(struct amdgpu_ip_block *ip_block)
 908{
 909	return sdma_v2_4_hw_init(ip_block);
 
 
 910}
 911
 912static bool sdma_v2_4_is_idle(void *handle)
 913{
 914	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 915	u32 tmp = RREG32(mmSRBM_STATUS2);
 916
 917	if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
 918		   SRBM_STATUS2__SDMA1_BUSY_MASK))
 919	    return false;
 920
 921	return true;
 922}
 923
 924static int sdma_v2_4_wait_for_idle(struct amdgpu_ip_block *ip_block)
 925{
 926	unsigned i;
 927	u32 tmp;
 928	struct amdgpu_device *adev = ip_block->adev;
 929
 930	for (i = 0; i < adev->usec_timeout; i++) {
 931		tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
 932				SRBM_STATUS2__SDMA1_BUSY_MASK);
 933
 934		if (!tmp)
 935			return 0;
 936		udelay(1);
 937	}
 938	return -ETIMEDOUT;
 939}
 940
 941static int sdma_v2_4_soft_reset(struct amdgpu_ip_block *ip_block)
 942{
 943	u32 srbm_soft_reset = 0;
 944	struct amdgpu_device *adev = ip_block->adev;
 945	u32 tmp = RREG32(mmSRBM_STATUS2);
 946
 947	if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
 948		/* sdma0 */
 949		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
 950		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
 951		WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
 952		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
 953	}
 954	if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
 955		/* sdma1 */
 956		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
 957		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
 958		WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
 959		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
 960	}
 961
 962	if (srbm_soft_reset) {
 963		tmp = RREG32(mmSRBM_SOFT_RESET);
 964		tmp |= srbm_soft_reset;
 965		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
 966		WREG32(mmSRBM_SOFT_RESET, tmp);
 967		tmp = RREG32(mmSRBM_SOFT_RESET);
 968
 969		udelay(50);
 970
 971		tmp &= ~srbm_soft_reset;
 972		WREG32(mmSRBM_SOFT_RESET, tmp);
 973		tmp = RREG32(mmSRBM_SOFT_RESET);
 974
 975		/* Wait a little for things to settle down */
 976		udelay(50);
 977	}
 978
 979	return 0;
 980}
 981
 982static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
 983					struct amdgpu_irq_src *src,
 984					unsigned type,
 985					enum amdgpu_interrupt_state state)
 986{
 987	u32 sdma_cntl;
 988
 989	switch (type) {
 990	case AMDGPU_SDMA_IRQ_INSTANCE0:
 991		switch (state) {
 992		case AMDGPU_IRQ_STATE_DISABLE:
 993			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
 994			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
 995			WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
 996			break;
 997		case AMDGPU_IRQ_STATE_ENABLE:
 998			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
 999			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1000			WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1001			break;
1002		default:
1003			break;
1004		}
1005		break;
1006	case AMDGPU_SDMA_IRQ_INSTANCE1:
1007		switch (state) {
1008		case AMDGPU_IRQ_STATE_DISABLE:
1009			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1010			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1011			WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1012			break;
1013		case AMDGPU_IRQ_STATE_ENABLE:
1014			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1015			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1016			WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1017			break;
1018		default:
1019			break;
1020		}
1021		break;
1022	default:
1023		break;
1024	}
1025	return 0;
1026}
1027
1028static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
1029				      struct amdgpu_irq_src *source,
1030				      struct amdgpu_iv_entry *entry)
1031{
1032	u8 instance_id, queue_id;
1033
1034	instance_id = (entry->ring_id & 0x3) >> 0;
1035	queue_id = (entry->ring_id & 0xc) >> 2;
1036	DRM_DEBUG("IH: SDMA trap\n");
1037	switch (instance_id) {
1038	case 0:
1039		switch (queue_id) {
1040		case 0:
1041			amdgpu_fence_process(&adev->sdma.instance[0].ring);
1042			break;
1043		case 1:
1044			/* XXX compute */
1045			break;
1046		case 2:
1047			/* XXX compute */
1048			break;
1049		}
1050		break;
1051	case 1:
1052		switch (queue_id) {
1053		case 0:
1054			amdgpu_fence_process(&adev->sdma.instance[1].ring);
1055			break;
1056		case 1:
1057			/* XXX compute */
1058			break;
1059		case 2:
1060			/* XXX compute */
1061			break;
1062		}
1063		break;
1064	}
1065	return 0;
1066}
1067
1068static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
1069					      struct amdgpu_irq_src *source,
1070					      struct amdgpu_iv_entry *entry)
1071{
1072	u8 instance_id, queue_id;
1073
1074	DRM_ERROR("Illegal instruction in SDMA command stream\n");
1075	instance_id = (entry->ring_id & 0x3) >> 0;
1076	queue_id = (entry->ring_id & 0xc) >> 2;
1077
1078	if (instance_id <= 1 && queue_id == 0)
1079		drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
1080	return 0;
1081}
1082
1083static int sdma_v2_4_set_clockgating_state(void *handle,
1084					  enum amd_clockgating_state state)
1085{
1086	/* XXX handled via the smc on VI */
1087	return 0;
1088}
1089
1090static int sdma_v2_4_set_powergating_state(void *handle,
1091					  enum amd_powergating_state state)
1092{
1093	return 0;
1094}
1095
1096static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
1097	.name = "sdma_v2_4",
1098	.early_init = sdma_v2_4_early_init,
 
1099	.sw_init = sdma_v2_4_sw_init,
1100	.sw_fini = sdma_v2_4_sw_fini,
1101	.hw_init = sdma_v2_4_hw_init,
1102	.hw_fini = sdma_v2_4_hw_fini,
1103	.suspend = sdma_v2_4_suspend,
1104	.resume = sdma_v2_4_resume,
1105	.is_idle = sdma_v2_4_is_idle,
1106	.wait_for_idle = sdma_v2_4_wait_for_idle,
1107	.soft_reset = sdma_v2_4_soft_reset,
1108	.set_clockgating_state = sdma_v2_4_set_clockgating_state,
1109	.set_powergating_state = sdma_v2_4_set_powergating_state,
1110};
1111
1112static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
1113	.type = AMDGPU_RING_TYPE_SDMA,
1114	.align_mask = 0xf,
1115	.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1116	.support_64bit_ptrs = false,
1117	.secure_submission_supported = true,
1118	.get_rptr = sdma_v2_4_ring_get_rptr,
1119	.get_wptr = sdma_v2_4_ring_get_wptr,
1120	.set_wptr = sdma_v2_4_ring_set_wptr,
1121	.emit_frame_size =
1122		6 + /* sdma_v2_4_ring_emit_hdp_flush */
1123		3 + /* hdp invalidate */
1124		6 + /* sdma_v2_4_ring_emit_pipeline_sync */
1125		VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* sdma_v2_4_ring_emit_vm_flush */
1126		10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
1127	.emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
1128	.emit_ib = sdma_v2_4_ring_emit_ib,
1129	.emit_fence = sdma_v2_4_ring_emit_fence,
1130	.emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
1131	.emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
1132	.emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
 
1133	.test_ring = sdma_v2_4_ring_test_ring,
1134	.test_ib = sdma_v2_4_ring_test_ib,
1135	.insert_nop = sdma_v2_4_ring_insert_nop,
1136	.pad_ib = sdma_v2_4_ring_pad_ib,
1137	.emit_wreg = sdma_v2_4_ring_emit_wreg,
1138};
1139
1140static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
1141{
1142	int i;
1143
1144	for (i = 0; i < adev->sdma.num_instances; i++) {
1145		adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
1146		adev->sdma.instance[i].ring.me = i;
1147	}
1148}
1149
1150static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
1151	.set = sdma_v2_4_set_trap_irq_state,
1152	.process = sdma_v2_4_process_trap_irq,
1153};
1154
1155static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = {
1156	.process = sdma_v2_4_process_illegal_inst_irq,
1157};
1158
1159static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
1160{
1161	adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1162	adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
1163	adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
1164}
1165
1166/**
1167 * sdma_v2_4_emit_copy_buffer - copy buffer using the sDMA engine
1168 *
1169 * @ib: indirect buffer to copy to
1170 * @src_offset: src GPU address
1171 * @dst_offset: dst GPU address
1172 * @byte_count: number of bytes to xfer
1173 * @copy_flags: unused
1174 *
1175 * Copy GPU buffers using the DMA engine (VI).
1176 * Used by the amdgpu ttm implementation to move pages if
1177 * registered as the asic copy callback.
1178 */
1179static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
1180				       uint64_t src_offset,
1181				       uint64_t dst_offset,
1182				       uint32_t byte_count,
1183				       uint32_t copy_flags)
1184{
1185	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1186		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1187	ib->ptr[ib->length_dw++] = byte_count;
1188	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1189	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1190	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1191	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1192	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1193}
1194
1195/**
1196 * sdma_v2_4_emit_fill_buffer - fill buffer using the sDMA engine
1197 *
1198 * @ib: indirect buffer to copy to
1199 * @src_data: value to write to buffer
1200 * @dst_offset: dst GPU address
1201 * @byte_count: number of bytes to xfer
1202 *
1203 * Fill GPU buffers using the DMA engine (VI).
1204 */
1205static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ib *ib,
1206				       uint32_t src_data,
1207				       uint64_t dst_offset,
1208				       uint32_t byte_count)
1209{
1210	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1211	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1212	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1213	ib->ptr[ib->length_dw++] = src_data;
1214	ib->ptr[ib->length_dw++] = byte_count;
1215}
1216
1217static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
1218	.copy_max_bytes = 0x1fffff,
1219	.copy_num_dw = 7,
1220	.emit_copy_buffer = sdma_v2_4_emit_copy_buffer,
1221
1222	.fill_max_bytes = 0x1fffff,
1223	.fill_num_dw = 7,
1224	.emit_fill_buffer = sdma_v2_4_emit_fill_buffer,
1225};
1226
1227static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
1228{
1229	adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
1230	adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 
 
1231}
1232
1233static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
1234	.copy_pte_num_dw = 7,
1235	.copy_pte = sdma_v2_4_vm_copy_pte,
1236
1237	.write_pte = sdma_v2_4_vm_write_pte,
1238	.set_pte_pde = sdma_v2_4_vm_set_pte_pde,
1239};
1240
1241static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
1242{
1243	unsigned i;
1244
1245	adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
1246	for (i = 0; i < adev->sdma.num_instances; i++) {
1247		adev->vm_manager.vm_pte_scheds[i] =
1248			&adev->sdma.instance[i].ring.sched;
 
 
 
1249	}
1250	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
1251}
1252
1253const struct amdgpu_ip_block_version sdma_v2_4_ip_block = {
 
1254	.type = AMD_IP_BLOCK_TYPE_SDMA,
1255	.major = 2,
1256	.minor = 4,
1257	.rev = 0,
1258	.funcs = &sdma_v2_4_ip_funcs,
1259};
v4.10.11
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
 
 
  24#include <linux/firmware.h>
  25#include <drm/drmP.h>
 
  26#include "amdgpu.h"
  27#include "amdgpu_ucode.h"
  28#include "amdgpu_trace.h"
  29#include "vi.h"
  30#include "vid.h"
  31
  32#include "oss/oss_2_4_d.h"
  33#include "oss/oss_2_4_sh_mask.h"
  34
  35#include "gmc/gmc_7_1_d.h"
  36#include "gmc/gmc_7_1_sh_mask.h"
  37
  38#include "gca/gfx_8_0_d.h"
  39#include "gca/gfx_8_0_enum.h"
  40#include "gca/gfx_8_0_sh_mask.h"
  41
  42#include "bif/bif_5_0_d.h"
  43#include "bif/bif_5_0_sh_mask.h"
  44
  45#include "iceland_sdma_pkt_open.h"
  46
 
 
  47static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
  48static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
  49static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
  50static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev);
  51
  52MODULE_FIRMWARE("amdgpu/topaz_sdma.bin");
  53MODULE_FIRMWARE("amdgpu/topaz_sdma1.bin");
  54
  55static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
  56{
  57	SDMA0_REGISTER_OFFSET,
  58	SDMA1_REGISTER_OFFSET
  59};
  60
  61static const u32 golden_settings_iceland_a11[] =
  62{
  63	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
  64	mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
  65	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
  66	mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
  67};
  68
  69static const u32 iceland_mgcg_cgcg_init[] =
  70{
  71	mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
  72	mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
  73};
  74
  75/*
  76 * sDMA - System DMA
  77 * Starting with CIK, the GPU has new asynchronous
  78 * DMA engines.  These engines are used for compute
  79 * and gfx.  There are two DMA engines (SDMA0, SDMA1)
  80 * and each one supports 1 ring buffer used for gfx
  81 * and 2 queues used for compute.
  82 *
  83 * The programming model is very similar to the CP
  84 * (ring buffer, IBs, etc.), but sDMA has it's own
  85 * packet format that is different from the PM4 format
  86 * used by the CP. sDMA supports copying data, writing
  87 * embedded data, solid fills, and a number of other
  88 * things.  It also has support for tiling/detiling of
  89 * buffers.
  90 */
  91
  92static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
  93{
  94	switch (adev->asic_type) {
  95	case CHIP_TOPAZ:
  96		amdgpu_program_register_sequence(adev,
  97						 iceland_mgcg_cgcg_init,
  98						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
  99		amdgpu_program_register_sequence(adev,
 100						 golden_settings_iceland_a11,
 101						 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
 102		break;
 103	default:
 104		break;
 105	}
 106}
 107
 108static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
 109{
 110	int i;
 111	for (i = 0; i < adev->sdma.num_instances; i++) {
 112		release_firmware(adev->sdma.instance[i].fw);
 113		adev->sdma.instance[i].fw = NULL;
 114	}
 115}
 116
 117/**
 118 * sdma_v2_4_init_microcode - load ucode images from disk
 119 *
 120 * @adev: amdgpu_device pointer
 121 *
 122 * Use the firmware interface to load the ucode images into
 123 * the driver (not loaded into hw).
 124 * Returns 0 on success, error on failure.
 125 */
 126static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
 127{
 128	const char *chip_name;
 129	char fw_name[30];
 130	int err = 0, i;
 131	struct amdgpu_firmware_info *info = NULL;
 132	const struct common_firmware_header *header = NULL;
 133	const struct sdma_firmware_header_v1_0 *hdr;
 134
 135	DRM_DEBUG("\n");
 136
 137	switch (adev->asic_type) {
 138	case CHIP_TOPAZ:
 139		chip_name = "topaz";
 140		break;
 141	default: BUG();
 
 142	}
 143
 144	for (i = 0; i < adev->sdma.num_instances; i++) {
 145		if (i == 0)
 146			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
 
 147		else
 148			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
 149		err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
 150		if (err)
 151			goto out;
 152		err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
 153		if (err)
 154			goto out;
 155		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
 156		adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
 157		adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
 158		if (adev->sdma.instance[i].feature_version >= 20)
 159			adev->sdma.instance[i].burst_nop = true;
 160
 161		if (adev->firmware.smu_load) {
 162			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
 163			info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
 164			info->fw = adev->sdma.instance[i].fw;
 165			header = (const struct common_firmware_header *)info->fw->data;
 166			adev->firmware.fw_size +=
 167				ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 168		}
 169	}
 170
 171out:
 172	if (err) {
 173		printk(KERN_ERR
 174		       "sdma_v2_4: Failed to load firmware \"%s\"\n",
 175		       fw_name);
 176		for (i = 0; i < adev->sdma.num_instances; i++) {
 177			release_firmware(adev->sdma.instance[i].fw);
 178			adev->sdma.instance[i].fw = NULL;
 179		}
 180	}
 181	return err;
 182}
 183
 184/**
 185 * sdma_v2_4_ring_get_rptr - get the current read pointer
 186 *
 187 * @ring: amdgpu ring pointer
 188 *
 189 * Get the current rptr from the hardware (VI+).
 190 */
 191static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
 192{
 193	/* XXX check if swapping is necessary on BE */
 194	return ring->adev->wb.wb[ring->rptr_offs] >> 2;
 195}
 196
 197/**
 198 * sdma_v2_4_ring_get_wptr - get the current write pointer
 199 *
 200 * @ring: amdgpu ring pointer
 201 *
 202 * Get the current wptr from the hardware (VI+).
 203 */
 204static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
 205{
 206	struct amdgpu_device *adev = ring->adev;
 207	int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
 208	u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
 209
 210	return wptr;
 211}
 212
 213/**
 214 * sdma_v2_4_ring_set_wptr - commit the write pointer
 215 *
 216 * @ring: amdgpu ring pointer
 217 *
 218 * Write the wptr back to the hardware (VI+).
 219 */
 220static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
 221{
 222	struct amdgpu_device *adev = ring->adev;
 223	int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
 224
 225	WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
 226}
 227
 228static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 229{
 230	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
 231	int i;
 232
 233	for (i = 0; i < count; i++)
 234		if (sdma && sdma->burst_nop && (i == 0))
 235			amdgpu_ring_write(ring, ring->funcs->nop |
 236				SDMA_PKT_NOP_HEADER_COUNT(count - 1));
 237		else
 238			amdgpu_ring_write(ring, ring->funcs->nop);
 239}
 240
 241/**
 242 * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine
 243 *
 244 * @ring: amdgpu ring pointer
 
 245 * @ib: IB object to schedule
 
 246 *
 247 * Schedule an IB in the DMA ring (VI).
 248 */
 249static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
 
 250				   struct amdgpu_ib *ib,
 251				   unsigned vm_id, bool ctx_switch)
 252{
 253	u32 vmid = vm_id & 0xf;
 254
 255	/* IB packet must end on a 8 DW boundary */
 256	sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
 257
 258	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
 259			  SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
 260	/* base must be 32 byte aligned */
 261	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
 262	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
 263	amdgpu_ring_write(ring, ib->length_dw);
 264	amdgpu_ring_write(ring, 0);
 265	amdgpu_ring_write(ring, 0);
 266
 267}
 268
 269/**
 270 * sdma_v2_4_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
 271 *
 272 * @ring: amdgpu ring pointer
 273 *
 274 * Emit an hdp flush packet on the requested DMA ring.
 275 */
 276static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 277{
 278	u32 ref_and_mask = 0;
 279
 280	if (ring == &ring->adev->sdma.instance[0].ring)
 281		ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
 282	else
 283		ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
 284
 285	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
 286			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
 287			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
 288	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
 289	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
 290	amdgpu_ring_write(ring, ref_and_mask); /* reference */
 291	amdgpu_ring_write(ring, ref_and_mask); /* mask */
 292	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
 293			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 294}
 295
 296static void sdma_v2_4_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
 297{
 298	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
 299			  SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
 300	amdgpu_ring_write(ring, mmHDP_DEBUG0);
 301	amdgpu_ring_write(ring, 1);
 302}
 303/**
 304 * sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring
 305 *
 306 * @ring: amdgpu ring pointer
 307 * @fence: amdgpu fence object
 
 
 308 *
 309 * Add a DMA fence packet to the ring to write
 310 * the fence seq number and DMA trap packet to generate
 311 * an interrupt if needed (VI).
 312 */
 313static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 314				      unsigned flags)
 315{
 316	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
 317	/* write the fence */
 318	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
 319	amdgpu_ring_write(ring, lower_32_bits(addr));
 320	amdgpu_ring_write(ring, upper_32_bits(addr));
 321	amdgpu_ring_write(ring, lower_32_bits(seq));
 322
 323	/* optionally write high bits as well */
 324	if (write64bit) {
 325		addr += 4;
 326		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
 327		amdgpu_ring_write(ring, lower_32_bits(addr));
 328		amdgpu_ring_write(ring, upper_32_bits(addr));
 329		amdgpu_ring_write(ring, upper_32_bits(seq));
 330	}
 331
 332	/* generate an interrupt */
 333	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
 334	amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
 335}
 336
 337/**
 338 * sdma_v2_4_gfx_stop - stop the gfx async dma engines
 339 *
 340 * @adev: amdgpu_device pointer
 341 *
 342 * Stop the gfx async dma ring buffers (VI).
 343 */
 344static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
 345{
 346	struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
 347	struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
 348	u32 rb_cntl, ib_cntl;
 349	int i;
 350
 351	if ((adev->mman.buffer_funcs_ring == sdma0) ||
 352	    (adev->mman.buffer_funcs_ring == sdma1))
 353		amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
 354
 355	for (i = 0; i < adev->sdma.num_instances; i++) {
 356		rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
 357		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
 358		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 359		ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
 360		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
 361		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 362	}
 363	sdma0->ready = false;
 364	sdma1->ready = false;
 365}
 366
 367/**
 368 * sdma_v2_4_rlc_stop - stop the compute async dma engines
 369 *
 370 * @adev: amdgpu_device pointer
 371 *
 372 * Stop the compute async dma queues (VI).
 373 */
 374static void sdma_v2_4_rlc_stop(struct amdgpu_device *adev)
 375{
 376	/* XXX todo */
 377}
 378
 379/**
 380 * sdma_v2_4_enable - stop the async dma engines
 381 *
 382 * @adev: amdgpu_device pointer
 383 * @enable: enable/disable the DMA MEs.
 384 *
 385 * Halt or unhalt the async dma engines (VI).
 386 */
 387static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
 388{
 389	u32 f32_cntl;
 390	int i;
 391
 392	if (!enable) {
 393		sdma_v2_4_gfx_stop(adev);
 394		sdma_v2_4_rlc_stop(adev);
 395	}
 396
 397	for (i = 0; i < adev->sdma.num_instances; i++) {
 398		f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
 399		if (enable)
 400			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
 401		else
 402			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
 403		WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
 404	}
 405}
 406
 407/**
 408 * sdma_v2_4_gfx_resume - setup and start the async dma engines
 409 *
 410 * @adev: amdgpu_device pointer
 411 *
 412 * Set up the gfx DMA ring buffers and enable them (VI).
 413 * Returns 0 for success, error for failure.
 414 */
 415static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
 416{
 417	struct amdgpu_ring *ring;
 418	u32 rb_cntl, ib_cntl;
 419	u32 rb_bufsz;
 420	u32 wb_offset;
 421	int i, j, r;
 422
 423	for (i = 0; i < adev->sdma.num_instances; i++) {
 424		ring = &adev->sdma.instance[i].ring;
 425		wb_offset = (ring->rptr_offs * 4);
 426
 427		mutex_lock(&adev->srbm_mutex);
 428		for (j = 0; j < 16; j++) {
 429			vi_srbm_select(adev, 0, 0, 0, j);
 430			/* SDMA GFX */
 431			WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
 432			WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
 433		}
 434		vi_srbm_select(adev, 0, 0, 0, 0);
 435		mutex_unlock(&adev->srbm_mutex);
 436
 437		WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
 438		       adev->gfx.config.gb_addr_config & 0x70);
 439
 440		WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
 441
 442		/* Set ring buffer size in dwords */
 443		rb_bufsz = order_base_2(ring->ring_size / 4);
 444		rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
 445		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
 446#ifdef __BIG_ENDIAN
 447		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
 448		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
 449					RPTR_WRITEBACK_SWAP_ENABLE, 1);
 450#endif
 451		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 452
 453		/* Initialize the ring buffer's read and write pointers */
 454		WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
 455		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
 456		WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
 457		WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
 458
 459		/* set the wb address whether it's enabled or not */
 460		WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
 461		       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
 462		WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
 463		       lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
 464
 465		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
 466
 467		WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
 468		WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
 469
 470		ring->wptr = 0;
 471		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
 472
 473		/* enable DMA RB */
 474		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
 475		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 476
 477		ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
 478		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
 479#ifdef __BIG_ENDIAN
 480		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
 481#endif
 482		/* enable DMA IBs */
 483		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 484
 485		ring->ready = true;
 486	}
 487
 488	sdma_v2_4_enable(adev, true);
 489	for (i = 0; i < adev->sdma.num_instances; i++) {
 490		ring = &adev->sdma.instance[i].ring;
 491		r = amdgpu_ring_test_ring(ring);
 492		if (r) {
 493			ring->ready = false;
 494			return r;
 495		}
 496
 497		if (adev->mman.buffer_funcs_ring == ring)
 498			amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
 499	}
 500
 501	return 0;
 502}
 503
 504/**
 505 * sdma_v2_4_rlc_resume - setup and start the async dma engines
 506 *
 507 * @adev: amdgpu_device pointer
 508 *
 509 * Set up the compute DMA queues and enable them (VI).
 510 * Returns 0 for success, error for failure.
 511 */
 512static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev)
 513{
 514	/* XXX todo */
 515	return 0;
 516}
 517
 518/**
 519 * sdma_v2_4_load_microcode - load the sDMA ME ucode
 520 *
 521 * @adev: amdgpu_device pointer
 522 *
 523 * Loads the sDMA0/1 ucode.
 524 * Returns 0 for success, -EINVAL if the ucode is not available.
 525 */
 526static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
 527{
 528	const struct sdma_firmware_header_v1_0 *hdr;
 529	const __le32 *fw_data;
 530	u32 fw_size;
 531	int i, j;
 532
 533	/* halt the MEs */
 534	sdma_v2_4_enable(adev, false);
 535
 536	for (i = 0; i < adev->sdma.num_instances; i++) {
 537		if (!adev->sdma.instance[i].fw)
 538			return -EINVAL;
 539		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
 540		amdgpu_ucode_print_sdma_hdr(&hdr->header);
 541		fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 542		fw_data = (const __le32 *)
 543			(adev->sdma.instance[i].fw->data +
 544			 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 545		WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
 546		for (j = 0; j < fw_size; j++)
 547			WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
 548		WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
 549	}
 550
 551	return 0;
 552}
 553
 554/**
 555 * sdma_v2_4_start - setup and start the async dma engines
 556 *
 557 * @adev: amdgpu_device pointer
 558 *
 559 * Set up the DMA engines and enable them (VI).
 560 * Returns 0 for success, error for failure.
 561 */
 562static int sdma_v2_4_start(struct amdgpu_device *adev)
 563{
 564	int r;
 565
 566	if (!adev->pp_enabled) {
 567		if (!adev->firmware.smu_load) {
 568			r = sdma_v2_4_load_microcode(adev);
 569			if (r)
 570				return r;
 571		} else {
 572			r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
 573							AMDGPU_UCODE_ID_SDMA0);
 574			if (r)
 575				return -EINVAL;
 576			r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
 577							AMDGPU_UCODE_ID_SDMA1);
 578			if (r)
 579				return -EINVAL;
 580		}
 581	}
 582
 583	/* halt the engine before programing */
 584	sdma_v2_4_enable(adev, false);
 585
 586	/* start the gfx rings and rlc compute queues */
 587	r = sdma_v2_4_gfx_resume(adev);
 588	if (r)
 589		return r;
 590	r = sdma_v2_4_rlc_resume(adev);
 591	if (r)
 592		return r;
 593
 594	return 0;
 595}
 596
 597/**
 598 * sdma_v2_4_ring_test_ring - simple async dma engine test
 599 *
 600 * @ring: amdgpu_ring structure holding ring information
 601 *
 602 * Test the DMA engine by writing using it to write an
 603 * value to memory. (VI).
 604 * Returns 0 for success, error for failure.
 605 */
 606static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
 607{
 608	struct amdgpu_device *adev = ring->adev;
 609	unsigned i;
 610	unsigned index;
 611	int r;
 612	u32 tmp;
 613	u64 gpu_addr;
 614
 615	r = amdgpu_wb_get(adev, &index);
 616	if (r) {
 617		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
 618		return r;
 619	}
 620
 621	gpu_addr = adev->wb.gpu_addr + (index * 4);
 622	tmp = 0xCAFEDEAD;
 623	adev->wb.wb[index] = cpu_to_le32(tmp);
 624
 625	r = amdgpu_ring_alloc(ring, 5);
 626	if (r) {
 627		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
 628		amdgpu_wb_free(adev, index);
 629		return r;
 630	}
 631
 632	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 633			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
 634	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
 635	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
 636	amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
 637	amdgpu_ring_write(ring, 0xDEADBEEF);
 638	amdgpu_ring_commit(ring);
 639
 640	for (i = 0; i < adev->usec_timeout; i++) {
 641		tmp = le32_to_cpu(adev->wb.wb[index]);
 642		if (tmp == 0xDEADBEEF)
 643			break;
 644		DRM_UDELAY(1);
 645	}
 646
 647	if (i < adev->usec_timeout) {
 648		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
 649	} else {
 650		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
 651			  ring->idx, tmp);
 652		r = -EINVAL;
 653	}
 654	amdgpu_wb_free(adev, index);
 655
 
 
 656	return r;
 657}
 658
 659/**
 660 * sdma_v2_4_ring_test_ib - test an IB on the DMA engine
 661 *
 662 * @ring: amdgpu_ring structure holding ring information
 
 663 *
 664 * Test a simple IB in the DMA ring (VI).
 665 * Returns 0 on success, error on failure.
 666 */
 667static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 668{
 669	struct amdgpu_device *adev = ring->adev;
 670	struct amdgpu_ib ib;
 671	struct dma_fence *f = NULL;
 672	unsigned index;
 673	u32 tmp = 0;
 674	u64 gpu_addr;
 675	long r;
 676
 677	r = amdgpu_wb_get(adev, &index);
 678	if (r) {
 679		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
 680		return r;
 681	}
 682
 683	gpu_addr = adev->wb.gpu_addr + (index * 4);
 684	tmp = 0xCAFEDEAD;
 685	adev->wb.wb[index] = cpu_to_le32(tmp);
 686	memset(&ib, 0, sizeof(ib));
 687	r = amdgpu_ib_get(adev, NULL, 256, &ib);
 688	if (r) {
 689		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
 690		goto err0;
 691	}
 692
 693	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 694		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
 695	ib.ptr[1] = lower_32_bits(gpu_addr);
 696	ib.ptr[2] = upper_32_bits(gpu_addr);
 697	ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
 698	ib.ptr[4] = 0xDEADBEEF;
 699	ib.ptr[5] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 700	ib.ptr[6] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 701	ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 702	ib.length_dw = 8;
 703
 704	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
 705	if (r)
 706		goto err1;
 707
 708	r = dma_fence_wait_timeout(f, false, timeout);
 709	if (r == 0) {
 710		DRM_ERROR("amdgpu: IB test timed out\n");
 711		r = -ETIMEDOUT;
 712		goto err1;
 713	} else if (r < 0) {
 714		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 715		goto err1;
 716	}
 717	tmp = le32_to_cpu(adev->wb.wb[index]);
 718	if (tmp == 0xDEADBEEF) {
 719		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
 720		r = 0;
 721	} else {
 722		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
 723		r = -EINVAL;
 724	}
 725
 726err1:
 727	amdgpu_ib_free(adev, &ib, NULL);
 728	dma_fence_put(f);
 729err0:
 730	amdgpu_wb_free(adev, index);
 731	return r;
 732}
 733
 734/**
 735 * sdma_v2_4_vm_copy_pte - update PTEs by copying them from the GART
 736 *
 737 * @ib: indirect buffer to fill with commands
 738 * @pe: addr of the page entry
 739 * @src: src addr to copy from
 740 * @count: number of page entries to update
 741 *
 742 * Update PTEs by copying them from the GART using sDMA (CIK).
 743 */
 744static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
 745				  uint64_t pe, uint64_t src,
 746				  unsigned count)
 747{
 748	unsigned bytes = count * 8;
 749
 750	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
 751		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
 752	ib->ptr[ib->length_dw++] = bytes;
 753	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
 754	ib->ptr[ib->length_dw++] = lower_32_bits(src);
 755	ib->ptr[ib->length_dw++] = upper_32_bits(src);
 756	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
 757	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 758}
 759
 760/**
 761 * sdma_v2_4_vm_write_pte - update PTEs by writing them manually
 762 *
 763 * @ib: indirect buffer to fill with commands
 764 * @pe: addr of the page entry
 765 * @value: dst addr to write into pe
 766 * @count: number of page entries to update
 767 * @incr: increase next addr by incr bytes
 768 *
 769 * Update PTEs by writing them manually using sDMA (CIK).
 770 */
 771static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
 772				   uint64_t value, unsigned count,
 773				   uint32_t incr)
 774{
 775	unsigned ndw = count * 2;
 776
 777	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 778		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
 779	ib->ptr[ib->length_dw++] = pe;
 780	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 781	ib->ptr[ib->length_dw++] = ndw;
 782	for (; ndw > 0; ndw -= 2) {
 783		ib->ptr[ib->length_dw++] = lower_32_bits(value);
 784		ib->ptr[ib->length_dw++] = upper_32_bits(value);
 785		value += incr;
 786	}
 787}
 788
 789/**
 790 * sdma_v2_4_vm_set_pte_pde - update the page tables using sDMA
 791 *
 792 * @ib: indirect buffer to fill with commands
 793 * @pe: addr of the page entry
 794 * @addr: dst addr to write into pe
 795 * @count: number of page entries to update
 796 * @incr: increase next addr by incr bytes
 797 * @flags: access flags
 798 *
 799 * Update the page tables using sDMA (CIK).
 800 */
 801static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
 802				     uint64_t addr, unsigned count,
 803				     uint32_t incr, uint32_t flags)
 804{
 805	/* for physically contiguous pages (vram) */
 806	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
 807	ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
 808	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 809	ib->ptr[ib->length_dw++] = flags; /* mask */
 810	ib->ptr[ib->length_dw++] = 0;
 811	ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
 812	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 813	ib->ptr[ib->length_dw++] = incr; /* increment size */
 814	ib->ptr[ib->length_dw++] = 0;
 815	ib->ptr[ib->length_dw++] = count; /* number of entries */
 816}
 817
 818/**
 819 * sdma_v2_4_ring_pad_ib - pad the IB to the required number of dw
 820 *
 
 821 * @ib: indirect buffer to fill with padding
 822 *
 823 */
 824static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 825{
 826	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
 827	u32 pad_count;
 828	int i;
 829
 830	pad_count = (8 - (ib->length_dw & 0x7)) % 8;
 831	for (i = 0; i < pad_count; i++)
 832		if (sdma && sdma->burst_nop && (i == 0))
 833			ib->ptr[ib->length_dw++] =
 834				SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
 835				SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
 836		else
 837			ib->ptr[ib->length_dw++] =
 838				SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 839}
 840
 841/**
 842 * sdma_v2_4_ring_emit_pipeline_sync - sync the pipeline
 843 *
 844 * @ring: amdgpu_ring pointer
 845 *
 846 * Make sure all previous operations are completed (CIK).
 847 */
 848static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 849{
 850	uint32_t seq = ring->fence_drv.sync_seq;
 851	uint64_t addr = ring->fence_drv.gpu_addr;
 852
 853	/* wait for idle */
 854	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
 855			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
 856			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
 857			  SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
 858	amdgpu_ring_write(ring, addr & 0xfffffffc);
 859	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
 860	amdgpu_ring_write(ring, seq); /* reference */
 861	amdgpu_ring_write(ring, 0xfffffff); /* mask */
 862	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
 863			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
 864}
 865
 866/**
 867 * sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA
 868 *
 869 * @ring: amdgpu_ring pointer
 870 * @vm: amdgpu_vm pointer
 
 871 *
 872 * Update the page table base and flush the VM TLB
 873 * using sDMA (VI).
 874 */
 875static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
 876					 unsigned vm_id, uint64_t pd_addr)
 877{
 878	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
 879			  SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
 880	if (vm_id < 8) {
 881		amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
 882	} else {
 883		amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
 884	}
 885	amdgpu_ring_write(ring, pd_addr >> 12);
 886
 887	/* flush TLB */
 888	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
 889			  SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
 890	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
 891	amdgpu_ring_write(ring, 1 << vm_id);
 892
 893	/* wait for flush */
 894	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
 895			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
 896			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
 897	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
 898	amdgpu_ring_write(ring, 0);
 899	amdgpu_ring_write(ring, 0); /* reference */
 900	amdgpu_ring_write(ring, 0); /* mask */
 901	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
 902			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 903}
 904
 905static int sdma_v2_4_early_init(void *handle)
 
 
 
 
 
 
 
 
 
 906{
 907	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 908
 909	adev->sdma.num_instances = SDMA_MAX_INSTANCE;
 910
 
 
 
 
 911	sdma_v2_4_set_ring_funcs(adev);
 912	sdma_v2_4_set_buffer_funcs(adev);
 913	sdma_v2_4_set_vm_pte_funcs(adev);
 914	sdma_v2_4_set_irq_funcs(adev);
 915
 916	return 0;
 917}
 918
 919static int sdma_v2_4_sw_init(void *handle)
 920{
 921	struct amdgpu_ring *ring;
 922	int r, i;
 923	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 924
 925	/* SDMA trap event */
 926	r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
 
 927	if (r)
 928		return r;
 929
 930	/* SDMA Privileged inst */
 931	r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
 
 932	if (r)
 933		return r;
 934
 935	/* SDMA Privileged inst */
 936	r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
 
 937	if (r)
 938		return r;
 939
 940	r = sdma_v2_4_init_microcode(adev);
 941	if (r) {
 942		DRM_ERROR("Failed to load sdma firmware!\n");
 943		return r;
 944	}
 945
 946	for (i = 0; i < adev->sdma.num_instances; i++) {
 947		ring = &adev->sdma.instance[i].ring;
 948		ring->ring_obj = NULL;
 949		ring->use_doorbell = false;
 950		sprintf(ring->name, "sdma%d", i);
 951		r = amdgpu_ring_init(adev, ring, 1024,
 952				     &adev->sdma.trap_irq,
 953				     (i == 0) ?
 954				     AMDGPU_SDMA_IRQ_TRAP0 :
 955				     AMDGPU_SDMA_IRQ_TRAP1);
 956		if (r)
 957			return r;
 958	}
 959
 960	return r;
 961}
 962
 963static int sdma_v2_4_sw_fini(void *handle)
 964{
 965	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 966	int i;
 967
 968	for (i = 0; i < adev->sdma.num_instances; i++)
 969		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 970
 971	sdma_v2_4_free_microcode(adev);
 972	return 0;
 973}
 974
 975static int sdma_v2_4_hw_init(void *handle)
 976{
 977	int r;
 978	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 979
 980	sdma_v2_4_init_golden_registers(adev);
 981
 982	r = sdma_v2_4_start(adev);
 983	if (r)
 984		return r;
 985
 986	return r;
 987}
 988
 989static int sdma_v2_4_hw_fini(void *handle)
 990{
 991	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 992
 993	sdma_v2_4_enable(adev, false);
 994
 995	return 0;
 996}
 997
 998static int sdma_v2_4_suspend(void *handle)
 999{
1000	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1001
1002	return sdma_v2_4_hw_fini(adev);
1003}
1004
1005static int sdma_v2_4_resume(void *handle)
1006{
1007	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1008
1009	return sdma_v2_4_hw_init(adev);
1010}
1011
1012static bool sdma_v2_4_is_idle(void *handle)
1013{
1014	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1015	u32 tmp = RREG32(mmSRBM_STATUS2);
1016
1017	if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
1018		   SRBM_STATUS2__SDMA1_BUSY_MASK))
1019	    return false;
1020
1021	return true;
1022}
1023
1024static int sdma_v2_4_wait_for_idle(void *handle)
1025{
1026	unsigned i;
1027	u32 tmp;
1028	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1029
1030	for (i = 0; i < adev->usec_timeout; i++) {
1031		tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
1032				SRBM_STATUS2__SDMA1_BUSY_MASK);
1033
1034		if (!tmp)
1035			return 0;
1036		udelay(1);
1037	}
1038	return -ETIMEDOUT;
1039}
1040
1041static int sdma_v2_4_soft_reset(void *handle)
1042{
1043	u32 srbm_soft_reset = 0;
1044	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1045	u32 tmp = RREG32(mmSRBM_STATUS2);
1046
1047	if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
1048		/* sdma0 */
1049		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
1050		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
1051		WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
1052		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
1053	}
1054	if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
1055		/* sdma1 */
1056		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
1057		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
1058		WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
1059		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
1060	}
1061
1062	if (srbm_soft_reset) {
1063		tmp = RREG32(mmSRBM_SOFT_RESET);
1064		tmp |= srbm_soft_reset;
1065		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1066		WREG32(mmSRBM_SOFT_RESET, tmp);
1067		tmp = RREG32(mmSRBM_SOFT_RESET);
1068
1069		udelay(50);
1070
1071		tmp &= ~srbm_soft_reset;
1072		WREG32(mmSRBM_SOFT_RESET, tmp);
1073		tmp = RREG32(mmSRBM_SOFT_RESET);
1074
1075		/* Wait a little for things to settle down */
1076		udelay(50);
1077	}
1078
1079	return 0;
1080}
1081
1082static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
1083					struct amdgpu_irq_src *src,
1084					unsigned type,
1085					enum amdgpu_interrupt_state state)
1086{
1087	u32 sdma_cntl;
1088
1089	switch (type) {
1090	case AMDGPU_SDMA_IRQ_TRAP0:
1091		switch (state) {
1092		case AMDGPU_IRQ_STATE_DISABLE:
1093			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1094			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1095			WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1096			break;
1097		case AMDGPU_IRQ_STATE_ENABLE:
1098			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1099			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1100			WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1101			break;
1102		default:
1103			break;
1104		}
1105		break;
1106	case AMDGPU_SDMA_IRQ_TRAP1:
1107		switch (state) {
1108		case AMDGPU_IRQ_STATE_DISABLE:
1109			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1110			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1111			WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1112			break;
1113		case AMDGPU_IRQ_STATE_ENABLE:
1114			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1115			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1116			WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1117			break;
1118		default:
1119			break;
1120		}
1121		break;
1122	default:
1123		break;
1124	}
1125	return 0;
1126}
1127
1128static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
1129				      struct amdgpu_irq_src *source,
1130				      struct amdgpu_iv_entry *entry)
1131{
1132	u8 instance_id, queue_id;
1133
1134	instance_id = (entry->ring_id & 0x3) >> 0;
1135	queue_id = (entry->ring_id & 0xc) >> 2;
1136	DRM_DEBUG("IH: SDMA trap\n");
1137	switch (instance_id) {
1138	case 0:
1139		switch (queue_id) {
1140		case 0:
1141			amdgpu_fence_process(&adev->sdma.instance[0].ring);
1142			break;
1143		case 1:
1144			/* XXX compute */
1145			break;
1146		case 2:
1147			/* XXX compute */
1148			break;
1149		}
1150		break;
1151	case 1:
1152		switch (queue_id) {
1153		case 0:
1154			amdgpu_fence_process(&adev->sdma.instance[1].ring);
1155			break;
1156		case 1:
1157			/* XXX compute */
1158			break;
1159		case 2:
1160			/* XXX compute */
1161			break;
1162		}
1163		break;
1164	}
1165	return 0;
1166}
1167
1168static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
1169					      struct amdgpu_irq_src *source,
1170					      struct amdgpu_iv_entry *entry)
1171{
 
 
1172	DRM_ERROR("Illegal instruction in SDMA command stream\n");
1173	schedule_work(&adev->reset_work);
 
 
 
 
1174	return 0;
1175}
1176
1177static int sdma_v2_4_set_clockgating_state(void *handle,
1178					  enum amd_clockgating_state state)
1179{
1180	/* XXX handled via the smc on VI */
1181	return 0;
1182}
1183
1184static int sdma_v2_4_set_powergating_state(void *handle,
1185					  enum amd_powergating_state state)
1186{
1187	return 0;
1188}
1189
1190static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
1191	.name = "sdma_v2_4",
1192	.early_init = sdma_v2_4_early_init,
1193	.late_init = NULL,
1194	.sw_init = sdma_v2_4_sw_init,
1195	.sw_fini = sdma_v2_4_sw_fini,
1196	.hw_init = sdma_v2_4_hw_init,
1197	.hw_fini = sdma_v2_4_hw_fini,
1198	.suspend = sdma_v2_4_suspend,
1199	.resume = sdma_v2_4_resume,
1200	.is_idle = sdma_v2_4_is_idle,
1201	.wait_for_idle = sdma_v2_4_wait_for_idle,
1202	.soft_reset = sdma_v2_4_soft_reset,
1203	.set_clockgating_state = sdma_v2_4_set_clockgating_state,
1204	.set_powergating_state = sdma_v2_4_set_powergating_state,
1205};
1206
1207static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
1208	.type = AMDGPU_RING_TYPE_SDMA,
1209	.align_mask = 0xf,
1210	.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
 
 
1211	.get_rptr = sdma_v2_4_ring_get_rptr,
1212	.get_wptr = sdma_v2_4_ring_get_wptr,
1213	.set_wptr = sdma_v2_4_ring_set_wptr,
1214	.emit_frame_size =
1215		6 + /* sdma_v2_4_ring_emit_hdp_flush */
1216		3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
1217		6 + /* sdma_v2_4_ring_emit_pipeline_sync */
1218		12 + /* sdma_v2_4_ring_emit_vm_flush */
1219		10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
1220	.emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
1221	.emit_ib = sdma_v2_4_ring_emit_ib,
1222	.emit_fence = sdma_v2_4_ring_emit_fence,
1223	.emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
1224	.emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
1225	.emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
1226	.emit_hdp_invalidate = sdma_v2_4_ring_emit_hdp_invalidate,
1227	.test_ring = sdma_v2_4_ring_test_ring,
1228	.test_ib = sdma_v2_4_ring_test_ib,
1229	.insert_nop = sdma_v2_4_ring_insert_nop,
1230	.pad_ib = sdma_v2_4_ring_pad_ib,
 
1231};
1232
1233static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
1234{
1235	int i;
1236
1237	for (i = 0; i < adev->sdma.num_instances; i++)
1238		adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
 
 
1239}
1240
1241static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
1242	.set = sdma_v2_4_set_trap_irq_state,
1243	.process = sdma_v2_4_process_trap_irq,
1244};
1245
1246static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = {
1247	.process = sdma_v2_4_process_illegal_inst_irq,
1248};
1249
1250static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
1251{
1252	adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1253	adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
1254	adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
1255}
1256
1257/**
1258 * sdma_v2_4_emit_copy_buffer - copy buffer using the sDMA engine
1259 *
1260 * @ring: amdgpu_ring structure holding ring information
1261 * @src_offset: src GPU address
1262 * @dst_offset: dst GPU address
1263 * @byte_count: number of bytes to xfer
 
1264 *
1265 * Copy GPU buffers using the DMA engine (VI).
1266 * Used by the amdgpu ttm implementation to move pages if
1267 * registered as the asic copy callback.
1268 */
1269static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
1270				       uint64_t src_offset,
1271				       uint64_t dst_offset,
1272				       uint32_t byte_count)
 
1273{
1274	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1275		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1276	ib->ptr[ib->length_dw++] = byte_count;
1277	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1278	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1279	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1280	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1281	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1282}
1283
1284/**
1285 * sdma_v2_4_emit_fill_buffer - fill buffer using the sDMA engine
1286 *
1287 * @ring: amdgpu_ring structure holding ring information
1288 * @src_data: value to write to buffer
1289 * @dst_offset: dst GPU address
1290 * @byte_count: number of bytes to xfer
1291 *
1292 * Fill GPU buffers using the DMA engine (VI).
1293 */
1294static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ib *ib,
1295				       uint32_t src_data,
1296				       uint64_t dst_offset,
1297				       uint32_t byte_count)
1298{
1299	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1300	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1301	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1302	ib->ptr[ib->length_dw++] = src_data;
1303	ib->ptr[ib->length_dw++] = byte_count;
1304}
1305
1306static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
1307	.copy_max_bytes = 0x1fffff,
1308	.copy_num_dw = 7,
1309	.emit_copy_buffer = sdma_v2_4_emit_copy_buffer,
1310
1311	.fill_max_bytes = 0x1fffff,
1312	.fill_num_dw = 7,
1313	.emit_fill_buffer = sdma_v2_4_emit_fill_buffer,
1314};
1315
1316static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
1317{
1318	if (adev->mman.buffer_funcs == NULL) {
1319		adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
1320		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1321	}
1322}
1323
1324static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
 
1325	.copy_pte = sdma_v2_4_vm_copy_pte,
 
1326	.write_pte = sdma_v2_4_vm_write_pte,
1327	.set_pte_pde = sdma_v2_4_vm_set_pte_pde,
1328};
1329
1330static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
1331{
1332	unsigned i;
1333
1334	if (adev->vm_manager.vm_pte_funcs == NULL) {
1335		adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
1336		for (i = 0; i < adev->sdma.num_instances; i++)
1337			adev->vm_manager.vm_pte_rings[i] =
1338				&adev->sdma.instance[i].ring;
1339
1340		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
1341	}
 
1342}
1343
1344const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
1345{
1346	.type = AMD_IP_BLOCK_TYPE_SDMA,
1347	.major = 2,
1348	.minor = 4,
1349	.rev = 0,
1350	.funcs = &sdma_v2_4_ip_funcs,
1351};