Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2013 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24
  25#include <linux/firmware.h>
  26#include <linux/module.h>
  27
  28#include "amdgpu.h"
  29#include "amdgpu_ucode.h"
  30#include "amdgpu_trace.h"
  31#include "cikd.h"
  32#include "cik.h"
  33
  34#include "bif/bif_4_1_d.h"
  35#include "bif/bif_4_1_sh_mask.h"
  36
  37#include "gca/gfx_7_2_d.h"
  38#include "gca/gfx_7_2_enum.h"
  39#include "gca/gfx_7_2_sh_mask.h"
  40
  41#include "gmc/gmc_7_1_d.h"
  42#include "gmc/gmc_7_1_sh_mask.h"
  43
  44#include "oss/oss_2_0_d.h"
  45#include "oss/oss_2_0_sh_mask.h"
  46
  47static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
  48{
  49	SDMA0_REGISTER_OFFSET,
  50	SDMA1_REGISTER_OFFSET
  51};
  52
  53static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
  54static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
  55static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
  56static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
  57static int cik_sdma_soft_reset(void *handle);
  58
  59MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin");
  60MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin");
  61MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin");
  62MODULE_FIRMWARE("amdgpu/hawaii_sdma1.bin");
  63MODULE_FIRMWARE("amdgpu/kaveri_sdma.bin");
  64MODULE_FIRMWARE("amdgpu/kaveri_sdma1.bin");
  65MODULE_FIRMWARE("amdgpu/kabini_sdma.bin");
  66MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin");
  67MODULE_FIRMWARE("amdgpu/mullins_sdma.bin");
  68MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin");
  69
  70u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
  71
  72
  73static void cik_sdma_free_microcode(struct amdgpu_device *adev)
  74{
  75	int i;
  76
  77	for (i = 0; i < adev->sdma.num_instances; i++)
  78		amdgpu_ucode_release(&adev->sdma.instance[i].fw);
  79}
  80
  81/*
  82 * sDMA - System DMA
  83 * Starting with CIK, the GPU has new asynchronous
  84 * DMA engines.  These engines are used for compute
  85 * and gfx.  There are two DMA engines (SDMA0, SDMA1)
  86 * and each one supports 1 ring buffer used for gfx
  87 * and 2 queues used for compute.
  88 *
  89 * The programming model is very similar to the CP
  90 * (ring buffer, IBs, etc.), but sDMA has it's own
  91 * packet format that is different from the PM4 format
  92 * used by the CP. sDMA supports copying data, writing
  93 * embedded data, solid fills, and a number of other
  94 * things.  It also has support for tiling/detiling of
  95 * buffers.
  96 */
  97
  98/**
  99 * cik_sdma_init_microcode - load ucode images from disk
 100 *
 101 * @adev: amdgpu_device pointer
 102 *
 103 * Use the firmware interface to load the ucode images into
 104 * the driver (not loaded into hw).
 105 * Returns 0 on success, error on failure.
 106 */
 107static int cik_sdma_init_microcode(struct amdgpu_device *adev)
 108{
 109	const char *chip_name;
 110	char fw_name[30];
 111	int err = 0, i;
 112
 113	DRM_DEBUG("\n");
 114
 115	switch (adev->asic_type) {
 116	case CHIP_BONAIRE:
 117		chip_name = "bonaire";
 118		break;
 119	case CHIP_HAWAII:
 120		chip_name = "hawaii";
 121		break;
 122	case CHIP_KAVERI:
 123		chip_name = "kaveri";
 124		break;
 125	case CHIP_KABINI:
 126		chip_name = "kabini";
 127		break;
 128	case CHIP_MULLINS:
 129		chip_name = "mullins";
 130		break;
 131	default: BUG();
 132	}
 133
 134	for (i = 0; i < adev->sdma.num_instances; i++) {
 135		if (i == 0)
 136			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
 137		else
 138			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
 139		err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name);
 140		if (err)
 141			goto out;
 
 142	}
 143out:
 144	if (err) {
 145		pr_err("cik_sdma: Failed to load firmware \"%s\"\n", fw_name);
 146		for (i = 0; i < adev->sdma.num_instances; i++)
 147			amdgpu_ucode_release(&adev->sdma.instance[i].fw);
 
 
 
 
 148	}
 149	return err;
 150}
 151
 152/**
 153 * cik_sdma_ring_get_rptr - get the current read pointer
 154 *
 155 * @ring: amdgpu ring pointer
 156 *
 157 * Get the current rptr from the hardware (CIK+).
 158 */
 159static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
 160{
 161	u32 rptr;
 162
 163	rptr = *ring->rptr_cpu_addr;
 164
 165	return (rptr & 0x3fffc) >> 2;
 166}
 167
 168/**
 169 * cik_sdma_ring_get_wptr - get the current write pointer
 170 *
 171 * @ring: amdgpu ring pointer
 172 *
 173 * Get the current wptr from the hardware (CIK+).
 174 */
 175static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
 176{
 177	struct amdgpu_device *adev = ring->adev;
 
 178
 179	return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) & 0x3fffc) >> 2;
 180}
 181
 182/**
 183 * cik_sdma_ring_set_wptr - commit the write pointer
 184 *
 185 * @ring: amdgpu ring pointer
 186 *
 187 * Write the wptr back to the hardware (CIK+).
 188 */
 189static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
 190{
 191	struct amdgpu_device *adev = ring->adev;
 
 192
 193	WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me],
 194	       (ring->wptr << 2) & 0x3fffc);
 195}
 196
 197static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 198{
 199	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 200	int i;
 201
 202	for (i = 0; i < count; i++)
 203		if (sdma && sdma->burst_nop && (i == 0))
 204			amdgpu_ring_write(ring, ring->funcs->nop |
 205					  SDMA_NOP_COUNT(count - 1));
 206		else
 207			amdgpu_ring_write(ring, ring->funcs->nop);
 208}
 209
 210/**
 211 * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine
 212 *
 213 * @ring: amdgpu ring pointer
 214 * @job: job to retrive vmid from
 215 * @ib: IB object to schedule
 216 * @flags: unused
 217 *
 218 * Schedule an IB in the DMA ring (CIK).
 219 */
 220static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
 221				  struct amdgpu_job *job,
 222				  struct amdgpu_ib *ib,
 223				  uint32_t flags)
 224{
 225	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 226	u32 extra_bits = vmid & 0xf;
 
 
 
 
 
 
 
 
 
 
 227
 228	/* IB packet must end on a 8 DW boundary */
 229	cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7);
 230
 231	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
 232	amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
 233	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
 234	amdgpu_ring_write(ring, ib->length_dw);
 235
 236}
 237
 238/**
 239 * cik_sdma_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
 240 *
 241 * @ring: amdgpu ring pointer
 242 *
 243 * Emit an hdp flush packet on the requested DMA ring.
 244 */
 245static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 246{
 247	u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
 248			  SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
 249	u32 ref_and_mask;
 250
 251	if (ring->me == 0)
 252		ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
 253	else
 254		ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
 255
 256	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
 257	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
 258	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
 259	amdgpu_ring_write(ring, ref_and_mask); /* reference */
 260	amdgpu_ring_write(ring, ref_and_mask); /* mask */
 261	amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
 262}
 263
 
 
 
 
 
 
 
 264/**
 265 * cik_sdma_ring_emit_fence - emit a fence on the DMA ring
 266 *
 267 * @ring: amdgpu ring pointer
 268 * @addr: address
 269 * @seq: sequence number
 270 * @flags: fence related flags
 271 *
 272 * Add a DMA fence packet to the ring to write
 273 * the fence seq number and DMA trap packet to generate
 274 * an interrupt if needed (CIK).
 275 */
 276static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 277				     unsigned flags)
 278{
 279	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
 280	/* write the fence */
 281	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
 282	amdgpu_ring_write(ring, lower_32_bits(addr));
 283	amdgpu_ring_write(ring, upper_32_bits(addr));
 284	amdgpu_ring_write(ring, lower_32_bits(seq));
 285
 286	/* optionally write high bits as well */
 287	if (write64bit) {
 288		addr += 4;
 289		amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
 290		amdgpu_ring_write(ring, lower_32_bits(addr));
 291		amdgpu_ring_write(ring, upper_32_bits(addr));
 292		amdgpu_ring_write(ring, upper_32_bits(seq));
 293	}
 294
 295	/* generate an interrupt */
 296	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
 297}
 298
 299/**
 300 * cik_sdma_gfx_stop - stop the gfx async dma engines
 301 *
 302 * @adev: amdgpu_device pointer
 303 *
 304 * Stop the gfx async dma ring buffers (CIK).
 305 */
 306static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
 307{
 
 
 308	u32 rb_cntl;
 309	int i;
 310
 
 
 
 
 311	for (i = 0; i < adev->sdma.num_instances; i++) {
 312		rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
 313		rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
 314		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 315		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
 316	}
 
 
 317}
 318
 319/**
 320 * cik_sdma_rlc_stop - stop the compute async dma engines
 321 *
 322 * @adev: amdgpu_device pointer
 323 *
 324 * Stop the compute async dma queues (CIK).
 325 */
 326static void cik_sdma_rlc_stop(struct amdgpu_device *adev)
 327{
 328	/* XXX todo */
 329}
 330
 331/**
 332 * cik_ctx_switch_enable - stop the async dma engines context switch
 333 *
 334 * @adev: amdgpu_device pointer
 335 * @enable: enable/disable the DMA MEs context switch.
 336 *
 337 * Halt or unhalt the async dma engines context switch (VI).
 338 */
 339static void cik_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
 340{
 341	u32 f32_cntl, phase_quantum = 0;
 342	int i;
 343
 344	if (amdgpu_sdma_phase_quantum) {
 345		unsigned value = amdgpu_sdma_phase_quantum;
 346		unsigned unit = 0;
 347
 348		while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
 349				SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
 350			value = (value + 1) >> 1;
 351			unit++;
 352		}
 353		if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
 354			    SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
 355			value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
 356				 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
 357			unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
 358				SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
 359			WARN_ONCE(1,
 360			"clamping sdma_phase_quantum to %uK clock cycles\n",
 361				  value << unit);
 362		}
 363		phase_quantum =
 364			value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
 365			unit  << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
 366	}
 367
 368	for (i = 0; i < adev->sdma.num_instances; i++) {
 369		f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
 370		if (enable) {
 371			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
 372					AUTO_CTXSW_ENABLE, 1);
 373			if (amdgpu_sdma_phase_quantum) {
 374				WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i],
 375				       phase_quantum);
 376				WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i],
 377				       phase_quantum);
 378			}
 379		} else {
 380			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
 381					AUTO_CTXSW_ENABLE, 0);
 382		}
 383
 384		WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
 385	}
 386}
 387
 388/**
 389 * cik_sdma_enable - stop the async dma engines
 390 *
 391 * @adev: amdgpu_device pointer
 392 * @enable: enable/disable the DMA MEs.
 393 *
 394 * Halt or unhalt the async dma engines (CIK).
 395 */
 396static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
 397{
 398	u32 me_cntl;
 399	int i;
 400
 401	if (!enable) {
 402		cik_sdma_gfx_stop(adev);
 403		cik_sdma_rlc_stop(adev);
 404	}
 405
 406	for (i = 0; i < adev->sdma.num_instances; i++) {
 407		me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
 408		if (enable)
 409			me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK;
 410		else
 411			me_cntl |= SDMA0_F32_CNTL__HALT_MASK;
 412		WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], me_cntl);
 413	}
 414}
 415
 416/**
 417 * cik_sdma_gfx_resume - setup and start the async dma engines
 418 *
 419 * @adev: amdgpu_device pointer
 420 *
 421 * Set up the gfx DMA ring buffers and enable them (CIK).
 422 * Returns 0 for success, error for failure.
 423 */
 424static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
 425{
 426	struct amdgpu_ring *ring;
 427	u32 rb_cntl, ib_cntl;
 428	u32 rb_bufsz;
 
 429	int i, j, r;
 430
 431	for (i = 0; i < adev->sdma.num_instances; i++) {
 432		ring = &adev->sdma.instance[i].ring;
 
 433
 434		mutex_lock(&adev->srbm_mutex);
 435		for (j = 0; j < 16; j++) {
 436			cik_srbm_select(adev, 0, 0, 0, j);
 437			/* SDMA GFX */
 438			WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
 439			WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
 440			/* XXX SDMA RLC - todo */
 441		}
 442		cik_srbm_select(adev, 0, 0, 0, 0);
 443		mutex_unlock(&adev->srbm_mutex);
 444
 445		WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
 446		       adev->gfx.config.gb_addr_config & 0x70);
 447
 448		WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
 449		WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
 450
 451		/* Set ring buffer size in dwords */
 452		rb_bufsz = order_base_2(ring->ring_size / 4);
 453		rb_cntl = rb_bufsz << 1;
 454#ifdef __BIG_ENDIAN
 455		rb_cntl |= SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK |
 456			SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK;
 457#endif
 458		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 459
 460		/* Initialize the ring buffer's read and write pointers */
 461		WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
 462		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
 463		WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
 464		WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
 465
 466		/* set the wb address whether it's enabled or not */
 467		WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
 468		       upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
 469		WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
 470		       ((ring->rptr_gpu_addr) & 0xFFFFFFFC));
 471
 472		rb_cntl |= SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK;
 473
 474		WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
 475		WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
 476
 477		ring->wptr = 0;
 478		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
 479
 480		/* enable DMA RB */
 481		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i],
 482		       rb_cntl | SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK);
 483
 484		ib_cntl = SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK;
 485#ifdef __BIG_ENDIAN
 486		ib_cntl |= SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK;
 487#endif
 488		/* enable DMA IBs */
 489		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 490	}
 491
 492	cik_sdma_enable(adev, true);
 493
 494	for (i = 0; i < adev->sdma.num_instances; i++) {
 495		ring = &adev->sdma.instance[i].ring;
 496		r = amdgpu_ring_test_helper(ring);
 497		if (r)
 498			return r;
 
 
 
 
 499	}
 500
 501	return 0;
 502}
 503
 504/**
 505 * cik_sdma_rlc_resume - setup and start the async dma engines
 506 *
 507 * @adev: amdgpu_device pointer
 508 *
 509 * Set up the compute DMA queues and enable them (CIK).
 510 * Returns 0 for success, error for failure.
 511 */
 512static int cik_sdma_rlc_resume(struct amdgpu_device *adev)
 513{
 514	/* XXX todo */
 515	return 0;
 516}
 517
 518/**
 519 * cik_sdma_load_microcode - load the sDMA ME ucode
 520 *
 521 * @adev: amdgpu_device pointer
 522 *
 523 * Loads the sDMA0/1 ucode.
 524 * Returns 0 for success, -EINVAL if the ucode is not available.
 525 */
 526static int cik_sdma_load_microcode(struct amdgpu_device *adev)
 527{
 528	const struct sdma_firmware_header_v1_0 *hdr;
 529	const __le32 *fw_data;
 530	u32 fw_size;
 531	int i, j;
 532
 533	/* halt the MEs */
 534	cik_sdma_enable(adev, false);
 535
 536	for (i = 0; i < adev->sdma.num_instances; i++) {
 537		if (!adev->sdma.instance[i].fw)
 538			return -EINVAL;
 539		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
 540		amdgpu_ucode_print_sdma_hdr(&hdr->header);
 541		fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 542		adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
 543		adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
 544		if (adev->sdma.instance[i].feature_version >= 20)
 545			adev->sdma.instance[i].burst_nop = true;
 546		fw_data = (const __le32 *)
 547			(adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 548		WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
 549		for (j = 0; j < fw_size; j++)
 550			WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
 551		WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
 552	}
 553
 554	return 0;
 555}
 556
 557/**
 558 * cik_sdma_start - setup and start the async dma engines
 559 *
 560 * @adev: amdgpu_device pointer
 561 *
 562 * Set up the DMA engines and enable them (CIK).
 563 * Returns 0 for success, error for failure.
 564 */
 565static int cik_sdma_start(struct amdgpu_device *adev)
 566{
 567	int r;
 568
 569	r = cik_sdma_load_microcode(adev);
 570	if (r)
 571		return r;
 572
 573	/* halt the engine before programing */
 574	cik_sdma_enable(adev, false);
 575	/* enable sdma ring preemption */
 576	cik_ctx_switch_enable(adev, true);
 577
 578	/* start the gfx rings and rlc compute queues */
 579	r = cik_sdma_gfx_resume(adev);
 580	if (r)
 581		return r;
 582	r = cik_sdma_rlc_resume(adev);
 583	if (r)
 584		return r;
 585
 586	return 0;
 587}
 588
 589/**
 590 * cik_sdma_ring_test_ring - simple async dma engine test
 591 *
 592 * @ring: amdgpu_ring structure holding ring information
 593 *
 594 * Test the DMA engine by writing using it to write an
 595 * value to memory. (CIK).
 596 * Returns 0 for success, error for failure.
 597 */
 598static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
 599{
 600	struct amdgpu_device *adev = ring->adev;
 601	unsigned i;
 602	unsigned index;
 603	int r;
 604	u32 tmp;
 605	u64 gpu_addr;
 606
 607	r = amdgpu_device_wb_get(adev, &index);
 608	if (r)
 
 609		return r;
 
 610
 611	gpu_addr = adev->wb.gpu_addr + (index * 4);
 612	tmp = 0xCAFEDEAD;
 613	adev->wb.wb[index] = cpu_to_le32(tmp);
 614
 615	r = amdgpu_ring_alloc(ring, 5);
 616	if (r)
 617		goto error_free_wb;
 618
 
 
 619	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
 620	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
 621	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
 622	amdgpu_ring_write(ring, 1); /* number of DWs to follow */
 623	amdgpu_ring_write(ring, 0xDEADBEEF);
 624	amdgpu_ring_commit(ring);
 625
 626	for (i = 0; i < adev->usec_timeout; i++) {
 627		tmp = le32_to_cpu(adev->wb.wb[index]);
 628		if (tmp == 0xDEADBEEF)
 629			break;
 630		udelay(1);
 631	}
 632
 633	if (i >= adev->usec_timeout)
 634		r = -ETIMEDOUT;
 
 
 
 
 
 
 635
 636error_free_wb:
 637	amdgpu_device_wb_free(adev, index);
 638	return r;
 639}
 640
 641/**
 642 * cik_sdma_ring_test_ib - test an IB on the DMA engine
 643 *
 644 * @ring: amdgpu_ring structure holding ring information
 645 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 646 *
 647 * Test a simple IB in the DMA ring (CIK).
 648 * Returns 0 on success, error on failure.
 649 */
 650static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 651{
 652	struct amdgpu_device *adev = ring->adev;
 653	struct amdgpu_ib ib;
 654	struct dma_fence *f = NULL;
 
 655	unsigned index;
 
 656	u32 tmp = 0;
 657	u64 gpu_addr;
 658	long r;
 659
 660	r = amdgpu_device_wb_get(adev, &index);
 661	if (r)
 
 662		return r;
 
 663
 664	gpu_addr = adev->wb.gpu_addr + (index * 4);
 665	tmp = 0xCAFEDEAD;
 666	adev->wb.wb[index] = cpu_to_le32(tmp);
 667	memset(&ib, 0, sizeof(ib));
 668	r = amdgpu_ib_get(adev, NULL, 256,
 669					AMDGPU_IB_POOL_DIRECT, &ib);
 670	if (r)
 671		goto err0;
 
 672
 673	ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE,
 674				SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
 675	ib.ptr[1] = lower_32_bits(gpu_addr);
 676	ib.ptr[2] = upper_32_bits(gpu_addr);
 677	ib.ptr[3] = 1;
 678	ib.ptr[4] = 0xDEADBEEF;
 679	ib.length_dw = 5;
 680	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 681	if (r)
 682		goto err1;
 683
 684	r = dma_fence_wait_timeout(f, false, timeout);
 685	if (r == 0) {
 686		r = -ETIMEDOUT;
 687		goto err1;
 688	} else if (r < 0) {
 689		goto err1;
 690	}
 691	tmp = le32_to_cpu(adev->wb.wb[index]);
 692	if (tmp == 0xDEADBEEF)
 693		r = 0;
 694	else
 
 
 
 
 
 
 
 
 695		r = -EINVAL;
 
 696
 697err1:
 
 698	amdgpu_ib_free(adev, &ib, NULL);
 699	dma_fence_put(f);
 700err0:
 701	amdgpu_device_wb_free(adev, index);
 702	return r;
 703}
 704
 705/**
 706 * cik_sdma_vm_copy_pte - update PTEs by copying them from the GART
 707 *
 708 * @ib: indirect buffer to fill with commands
 709 * @pe: addr of the page entry
 710 * @src: src addr to copy from
 711 * @count: number of page entries to update
 712 *
 713 * Update PTEs by copying them from the GART using sDMA (CIK).
 714 */
 715static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
 716				 uint64_t pe, uint64_t src,
 717				 unsigned count)
 718{
 719	unsigned bytes = count * 8;
 
 
 
 720
 721	ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
 722		SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
 723	ib->ptr[ib->length_dw++] = bytes;
 724	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
 725	ib->ptr[ib->length_dw++] = lower_32_bits(src);
 726	ib->ptr[ib->length_dw++] = upper_32_bits(src);
 727	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
 728	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 
 
 
 
 
 729}
 730
 731/**
 732 * cik_sdma_vm_write_pte - update PTEs by writing them manually
 733 *
 734 * @ib: indirect buffer to fill with commands
 735 * @pe: addr of the page entry
 736 * @value: dst addr to write into pe
 737 * @count: number of page entries to update
 738 * @incr: increase next addr by incr bytes
 
 739 *
 740 * Update PTEs by writing them manually using sDMA (CIK).
 741 */
 742static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
 743				  uint64_t value, unsigned count,
 744				  uint32_t incr)
 745{
 746	unsigned ndw = count * 2;
 747
 748	ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
 749		SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
 750	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
 751	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 752	ib->ptr[ib->length_dw++] = ndw;
 753	for (; ndw > 0; ndw -= 2) {
 754		ib->ptr[ib->length_dw++] = lower_32_bits(value);
 755		ib->ptr[ib->length_dw++] = upper_32_bits(value);
 756		value += incr;
 
 
 
 
 
 
 
 
 
 
 
 757	}
 758}
 759
 760/**
 761 * cik_sdma_vm_set_pte_pde - update the page tables using sDMA
 762 *
 763 * @ib: indirect buffer to fill with commands
 764 * @pe: addr of the page entry
 765 * @addr: dst addr to write into pe
 766 * @count: number of page entries to update
 767 * @incr: increase next addr by incr bytes
 768 * @flags: access flags
 769 *
 770 * Update the page tables using sDMA (CIK).
 771 */
 772static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
 
 773				    uint64_t addr, unsigned count,
 774				    uint32_t incr, uint64_t flags)
 775{
 776	/* for physically contiguous pages (vram) */
 777	ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
 778	ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
 779	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 780	ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
 781	ib->ptr[ib->length_dw++] = upper_32_bits(flags);
 782	ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
 783	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 784	ib->ptr[ib->length_dw++] = incr; /* increment size */
 785	ib->ptr[ib->length_dw++] = 0;
 786	ib->ptr[ib->length_dw++] = count; /* number of entries */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 787}
 788
 789/**
 790 * cik_sdma_ring_pad_ib - pad the IB to the required number of dw
 791 *
 792 * @ring: amdgpu_ring structure holding ring information
 793 * @ib: indirect buffer to fill with padding
 794 *
 795 */
 796static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 797{
 798	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 799	u32 pad_count;
 800	int i;
 801
 802	pad_count = (-ib->length_dw) & 7;
 803	for (i = 0; i < pad_count; i++)
 804		if (sdma && sdma->burst_nop && (i == 0))
 805			ib->ptr[ib->length_dw++] =
 806					SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0) |
 807					SDMA_NOP_COUNT(pad_count - 1);
 808		else
 809			ib->ptr[ib->length_dw++] =
 810					SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
 811}
 812
 813/**
 814 * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
 815 *
 816 * @ring: amdgpu_ring pointer
 817 *
 818 * Make sure all previous operations are completed (CIK).
 819 */
 820static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 821{
 822	uint32_t seq = ring->fence_drv.sync_seq;
 823	uint64_t addr = ring->fence_drv.gpu_addr;
 824
 825	/* wait for idle */
 826	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0,
 827					    SDMA_POLL_REG_MEM_EXTRA_OP(0) |
 828					    SDMA_POLL_REG_MEM_EXTRA_FUNC(3) | /* equal */
 829					    SDMA_POLL_REG_MEM_EXTRA_M));
 830	amdgpu_ring_write(ring, addr & 0xfffffffc);
 831	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
 832	amdgpu_ring_write(ring, seq); /* reference */
 833	amdgpu_ring_write(ring, 0xffffffff); /* mask */
 834	amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
 835}
 836
 837/**
 838 * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA
 839 *
 840 * @ring: amdgpu_ring pointer
 841 * @vmid: vmid number to use
 842 * @pd_addr: address
 843 *
 844 * Update the page table base and flush the VM TLB
 845 * using sDMA (CIK).
 846 */
 847static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
 848					unsigned vmid, uint64_t pd_addr)
 849{
 850	u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
 851			  SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
 852
 853	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
 
 
 
 
 
 
 
 
 
 
 
 854
 855	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
 856	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
 857	amdgpu_ring_write(ring, 0);
 858	amdgpu_ring_write(ring, 0); /* reference */
 859	amdgpu_ring_write(ring, 0); /* mask */
 860	amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
 861}
 862
 863static void cik_sdma_ring_emit_wreg(struct amdgpu_ring *ring,
 864				    uint32_t reg, uint32_t val)
 865{
 866	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
 867	amdgpu_ring_write(ring, reg);
 868	amdgpu_ring_write(ring, val);
 869}
 870
 871static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
 872				 bool enable)
 873{
 874	u32 orig, data;
 875
 876	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
 877		WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
 878		WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
 879	} else {
 880		orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
 881		data |= 0xff000000;
 882		if (data != orig)
 883			WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
 884
 885		orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
 886		data |= 0xff000000;
 887		if (data != orig)
 888			WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
 889	}
 890}
 891
 892static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
 893				 bool enable)
 894{
 895	u32 orig, data;
 896
 897	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
 898		orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
 899		data |= 0x100;
 900		if (orig != data)
 901			WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
 902
 903		orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
 904		data |= 0x100;
 905		if (orig != data)
 906			WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
 907	} else {
 908		orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
 909		data &= ~0x100;
 910		if (orig != data)
 911			WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
 912
 913		orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
 914		data &= ~0x100;
 915		if (orig != data)
 916			WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
 917	}
 918}
 919
 920static int cik_sdma_early_init(void *handle)
 921{
 922	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 923	int r;
 924
 925	adev->sdma.num_instances = SDMA_MAX_INSTANCE;
 926
 927	r = cik_sdma_init_microcode(adev);
 928	if (r)
 929		return r;
 930
 931	cik_sdma_set_ring_funcs(adev);
 932	cik_sdma_set_irq_funcs(adev);
 933	cik_sdma_set_buffer_funcs(adev);
 934	cik_sdma_set_vm_pte_funcs(adev);
 935
 936	return 0;
 937}
 938
 939static int cik_sdma_sw_init(void *handle)
 940{
 941	struct amdgpu_ring *ring;
 942	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 943	int r, i;
 944
 
 
 
 
 
 
 945	/* SDMA trap event */
 946	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224,
 947			      &adev->sdma.trap_irq);
 948	if (r)
 949		return r;
 950
 951	/* SDMA Privileged inst */
 952	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
 953			      &adev->sdma.illegal_inst_irq);
 954	if (r)
 955		return r;
 956
 957	/* SDMA Privileged inst */
 958	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 247,
 959			      &adev->sdma.illegal_inst_irq);
 960	if (r)
 961		return r;
 962
 963	for (i = 0; i < adev->sdma.num_instances; i++) {
 964		ring = &adev->sdma.instance[i].ring;
 965		ring->ring_obj = NULL;
 966		sprintf(ring->name, "sdma%d", i);
 967		r = amdgpu_ring_init(adev, ring, 1024,
 
 968				     &adev->sdma.trap_irq,
 969				     (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
 970				     AMDGPU_SDMA_IRQ_INSTANCE1,
 971				     AMDGPU_RING_PRIO_DEFAULT, NULL);
 972		if (r)
 973			return r;
 974	}
 975
 976	return r;
 977}
 978
 979static int cik_sdma_sw_fini(void *handle)
 980{
 981	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 982	int i;
 983
 984	for (i = 0; i < adev->sdma.num_instances; i++)
 985		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 986
 987	cik_sdma_free_microcode(adev);
 988	return 0;
 989}
 990
 991static int cik_sdma_hw_init(void *handle)
 992{
 993	int r;
 994	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 995
 996	r = cik_sdma_start(adev);
 997	if (r)
 998		return r;
 999
1000	return r;
1001}
1002
1003static int cik_sdma_hw_fini(void *handle)
1004{
1005	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1006
1007	cik_ctx_switch_enable(adev, false);
1008	cik_sdma_enable(adev, false);
1009
1010	return 0;
1011}
1012
1013static int cik_sdma_suspend(void *handle)
1014{
1015	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1016
1017	return cik_sdma_hw_fini(adev);
1018}
1019
1020static int cik_sdma_resume(void *handle)
1021{
1022	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1023
1024	cik_sdma_soft_reset(handle);
1025
1026	return cik_sdma_hw_init(adev);
1027}
1028
1029static bool cik_sdma_is_idle(void *handle)
1030{
1031	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1032	u32 tmp = RREG32(mmSRBM_STATUS2);
1033
1034	if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
1035				SRBM_STATUS2__SDMA1_BUSY_MASK))
1036	    return false;
1037
1038	return true;
1039}
1040
1041static int cik_sdma_wait_for_idle(void *handle)
1042{
1043	unsigned i;
1044	u32 tmp;
1045	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1046
1047	for (i = 0; i < adev->usec_timeout; i++) {
1048		tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
1049				SRBM_STATUS2__SDMA1_BUSY_MASK);
1050
1051		if (!tmp)
1052			return 0;
1053		udelay(1);
1054	}
1055	return -ETIMEDOUT;
1056}
1057
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1058static int cik_sdma_soft_reset(void *handle)
1059{
1060	u32 srbm_soft_reset = 0;
1061	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1062	u32 tmp;
1063
1064	/* sdma0 */
1065	tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
1066	tmp |= SDMA0_F32_CNTL__HALT_MASK;
1067	WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
1068	srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
1069
1070	/* sdma1 */
1071	tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
1072	tmp |= SDMA0_F32_CNTL__HALT_MASK;
1073	WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
1074	srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
 
 
 
1075
1076	if (srbm_soft_reset) {
 
 
1077		tmp = RREG32(mmSRBM_SOFT_RESET);
1078		tmp |= srbm_soft_reset;
1079		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1080		WREG32(mmSRBM_SOFT_RESET, tmp);
1081		tmp = RREG32(mmSRBM_SOFT_RESET);
1082
1083		udelay(50);
1084
1085		tmp &= ~srbm_soft_reset;
1086		WREG32(mmSRBM_SOFT_RESET, tmp);
1087		tmp = RREG32(mmSRBM_SOFT_RESET);
1088
1089		/* Wait a little for things to settle down */
1090		udelay(50);
 
 
1091	}
1092
1093	return 0;
1094}
1095
1096static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
1097				       struct amdgpu_irq_src *src,
1098				       unsigned type,
1099				       enum amdgpu_interrupt_state state)
1100{
1101	u32 sdma_cntl;
1102
1103	switch (type) {
1104	case AMDGPU_SDMA_IRQ_INSTANCE0:
1105		switch (state) {
1106		case AMDGPU_IRQ_STATE_DISABLE:
1107			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1108			sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
1109			WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1110			break;
1111		case AMDGPU_IRQ_STATE_ENABLE:
1112			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1113			sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
1114			WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1115			break;
1116		default:
1117			break;
1118		}
1119		break;
1120	case AMDGPU_SDMA_IRQ_INSTANCE1:
1121		switch (state) {
1122		case AMDGPU_IRQ_STATE_DISABLE:
1123			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1124			sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
1125			WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1126			break;
1127		case AMDGPU_IRQ_STATE_ENABLE:
1128			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1129			sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
1130			WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1131			break;
1132		default:
1133			break;
1134		}
1135		break;
1136	default:
1137		break;
1138	}
1139	return 0;
1140}
1141
1142static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
1143				     struct amdgpu_irq_src *source,
1144				     struct amdgpu_iv_entry *entry)
1145{
1146	u8 instance_id, queue_id;
1147
1148	instance_id = (entry->ring_id & 0x3) >> 0;
1149	queue_id = (entry->ring_id & 0xc) >> 2;
1150	DRM_DEBUG("IH: SDMA trap\n");
1151	switch (instance_id) {
1152	case 0:
1153		switch (queue_id) {
1154		case 0:
1155			amdgpu_fence_process(&adev->sdma.instance[0].ring);
1156			break;
1157		case 1:
1158			/* XXX compute */
1159			break;
1160		case 2:
1161			/* XXX compute */
1162			break;
1163		}
1164		break;
1165	case 1:
1166		switch (queue_id) {
1167		case 0:
1168			amdgpu_fence_process(&adev->sdma.instance[1].ring);
1169			break;
1170		case 1:
1171			/* XXX compute */
1172			break;
1173		case 2:
1174			/* XXX compute */
1175			break;
1176		}
1177		break;
1178	}
1179
1180	return 0;
1181}
1182
1183static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
1184					     struct amdgpu_irq_src *source,
1185					     struct amdgpu_iv_entry *entry)
1186{
1187	u8 instance_id;
1188
1189	DRM_ERROR("Illegal instruction in SDMA command stream\n");
1190	instance_id = (entry->ring_id & 0x3) >> 0;
1191	drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
1192	return 0;
1193}
1194
1195static int cik_sdma_set_clockgating_state(void *handle,
1196					  enum amd_clockgating_state state)
1197{
1198	bool gate = false;
1199	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1200
1201	if (state == AMD_CG_STATE_GATE)
1202		gate = true;
1203
1204	cik_enable_sdma_mgcg(adev, gate);
1205	cik_enable_sdma_mgls(adev, gate);
1206
1207	return 0;
1208}
1209
1210static int cik_sdma_set_powergating_state(void *handle,
1211					  enum amd_powergating_state state)
1212{
1213	return 0;
1214}
1215
1216static const struct amd_ip_funcs cik_sdma_ip_funcs = {
1217	.name = "cik_sdma",
1218	.early_init = cik_sdma_early_init,
1219	.late_init = NULL,
1220	.sw_init = cik_sdma_sw_init,
1221	.sw_fini = cik_sdma_sw_fini,
1222	.hw_init = cik_sdma_hw_init,
1223	.hw_fini = cik_sdma_hw_fini,
1224	.suspend = cik_sdma_suspend,
1225	.resume = cik_sdma_resume,
1226	.is_idle = cik_sdma_is_idle,
1227	.wait_for_idle = cik_sdma_wait_for_idle,
1228	.soft_reset = cik_sdma_soft_reset,
 
1229	.set_clockgating_state = cik_sdma_set_clockgating_state,
1230	.set_powergating_state = cik_sdma_set_powergating_state,
1231};
1232
1233static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
1234	.type = AMDGPU_RING_TYPE_SDMA,
1235	.align_mask = 0xf,
1236	.nop = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0),
1237	.support_64bit_ptrs = false,
1238	.get_rptr = cik_sdma_ring_get_rptr,
1239	.get_wptr = cik_sdma_ring_get_wptr,
1240	.set_wptr = cik_sdma_ring_set_wptr,
1241	.emit_frame_size =
1242		6 + /* cik_sdma_ring_emit_hdp_flush */
1243		3 + /* hdp invalidate */
1244		6 + /* cik_sdma_ring_emit_pipeline_sync */
1245		CIK_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* cik_sdma_ring_emit_vm_flush */
1246		9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
1247	.emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */
1248	.emit_ib = cik_sdma_ring_emit_ib,
1249	.emit_fence = cik_sdma_ring_emit_fence,
1250	.emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
1251	.emit_vm_flush = cik_sdma_ring_emit_vm_flush,
1252	.emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
 
1253	.test_ring = cik_sdma_ring_test_ring,
1254	.test_ib = cik_sdma_ring_test_ib,
1255	.insert_nop = cik_sdma_ring_insert_nop,
1256	.pad_ib = cik_sdma_ring_pad_ib,
1257	.emit_wreg = cik_sdma_ring_emit_wreg,
1258};
1259
1260static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
1261{
1262	int i;
1263
1264	for (i = 0; i < adev->sdma.num_instances; i++) {
1265		adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
1266		adev->sdma.instance[i].ring.me = i;
1267	}
1268}
1269
1270static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
1271	.set = cik_sdma_set_trap_irq_state,
1272	.process = cik_sdma_process_trap_irq,
1273};
1274
1275static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = {
1276	.process = cik_sdma_process_illegal_inst_irq,
1277};
1278
1279static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
1280{
1281	adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1282	adev->sdma.trap_irq.funcs = &cik_sdma_trap_irq_funcs;
1283	adev->sdma.illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
1284}
1285
1286/**
1287 * cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine
1288 *
1289 * @ib: indirect buffer to copy to
1290 * @src_offset: src GPU address
1291 * @dst_offset: dst GPU address
1292 * @byte_count: number of bytes to xfer
1293 * @tmz: is this a secure operation
1294 *
1295 * Copy GPU buffers using the DMA engine (CIK).
1296 * Used by the amdgpu ttm implementation to move pages if
1297 * registered as the asic copy callback.
1298 */
1299static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
1300				      uint64_t src_offset,
1301				      uint64_t dst_offset,
1302				      uint32_t byte_count,
1303				      bool tmz)
1304{
1305	ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
1306	ib->ptr[ib->length_dw++] = byte_count;
1307	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1308	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1309	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1310	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1311	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1312}
1313
1314/**
1315 * cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine
1316 *
1317 * @ib: indirect buffer to fill
1318 * @src_data: value to write to buffer
1319 * @dst_offset: dst GPU address
1320 * @byte_count: number of bytes to xfer
1321 *
1322 * Fill GPU buffers using the DMA engine (CIK).
1323 */
1324static void cik_sdma_emit_fill_buffer(struct amdgpu_ib *ib,
1325				      uint32_t src_data,
1326				      uint64_t dst_offset,
1327				      uint32_t byte_count)
1328{
1329	ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0);
1330	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1331	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1332	ib->ptr[ib->length_dw++] = src_data;
1333	ib->ptr[ib->length_dw++] = byte_count;
1334}
1335
1336static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
1337	.copy_max_bytes = 0x1fffff,
1338	.copy_num_dw = 7,
1339	.emit_copy_buffer = cik_sdma_emit_copy_buffer,
1340
1341	.fill_max_bytes = 0x1fffff,
1342	.fill_num_dw = 5,
1343	.emit_fill_buffer = cik_sdma_emit_fill_buffer,
1344};
1345
1346static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
1347{
1348	adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
1349	adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 
 
1350}
1351
1352static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
1353	.copy_pte_num_dw = 7,
1354	.copy_pte = cik_sdma_vm_copy_pte,
1355
1356	.write_pte = cik_sdma_vm_write_pte,
1357	.set_pte_pde = cik_sdma_vm_set_pte_pde,
1358};
1359
1360static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
1361{
1362	unsigned i;
1363
1364	adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
1365	for (i = 0; i < adev->sdma.num_instances; i++) {
1366		adev->vm_manager.vm_pte_scheds[i] =
1367			&adev->sdma.instance[i].ring.sched;
 
 
 
1368	}
1369	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
1370}
1371
1372const struct amdgpu_ip_block_version cik_sdma_ip_block =
1373{
1374	.type = AMD_IP_BLOCK_TYPE_SDMA,
1375	.major = 2,
1376	.minor = 0,
1377	.rev = 0,
1378	.funcs = &cik_sdma_ip_funcs,
1379};
v4.6
   1/*
   2 * Copyright 2013 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
 
  24#include <linux/firmware.h>
  25#include <drm/drmP.h>
 
  26#include "amdgpu.h"
  27#include "amdgpu_ucode.h"
  28#include "amdgpu_trace.h"
  29#include "cikd.h"
  30#include "cik.h"
  31
  32#include "bif/bif_4_1_d.h"
  33#include "bif/bif_4_1_sh_mask.h"
  34
  35#include "gca/gfx_7_2_d.h"
  36#include "gca/gfx_7_2_enum.h"
  37#include "gca/gfx_7_2_sh_mask.h"
  38
  39#include "gmc/gmc_7_1_d.h"
  40#include "gmc/gmc_7_1_sh_mask.h"
  41
  42#include "oss/oss_2_0_d.h"
  43#include "oss/oss_2_0_sh_mask.h"
  44
  45static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
  46{
  47	SDMA0_REGISTER_OFFSET,
  48	SDMA1_REGISTER_OFFSET
  49};
  50
  51static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
  52static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
  53static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
  54static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
 
  55
  56MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
  57MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
  58MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
  59MODULE_FIRMWARE("radeon/hawaii_sdma1.bin");
  60MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
  61MODULE_FIRMWARE("radeon/kaveri_sdma1.bin");
  62MODULE_FIRMWARE("radeon/kabini_sdma.bin");
  63MODULE_FIRMWARE("radeon/kabini_sdma1.bin");
  64MODULE_FIRMWARE("radeon/mullins_sdma.bin");
  65MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
  66
  67u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
  68
 
 
 
 
 
 
 
 
 
  69/*
  70 * sDMA - System DMA
  71 * Starting with CIK, the GPU has new asynchronous
  72 * DMA engines.  These engines are used for compute
  73 * and gfx.  There are two DMA engines (SDMA0, SDMA1)
  74 * and each one supports 1 ring buffer used for gfx
  75 * and 2 queues used for compute.
  76 *
  77 * The programming model is very similar to the CP
  78 * (ring buffer, IBs, etc.), but sDMA has it's own
  79 * packet format that is different from the PM4 format
  80 * used by the CP. sDMA supports copying data, writing
  81 * embedded data, solid fills, and a number of other
  82 * things.  It also has support for tiling/detiling of
  83 * buffers.
  84 */
  85
  86/**
  87 * cik_sdma_init_microcode - load ucode images from disk
  88 *
  89 * @adev: amdgpu_device pointer
  90 *
  91 * Use the firmware interface to load the ucode images into
  92 * the driver (not loaded into hw).
  93 * Returns 0 on success, error on failure.
  94 */
  95static int cik_sdma_init_microcode(struct amdgpu_device *adev)
  96{
  97	const char *chip_name;
  98	char fw_name[30];
  99	int err = 0, i;
 100
 101	DRM_DEBUG("\n");
 102
 103	switch (adev->asic_type) {
 104	case CHIP_BONAIRE:
 105		chip_name = "bonaire";
 106		break;
 107	case CHIP_HAWAII:
 108		chip_name = "hawaii";
 109		break;
 110	case CHIP_KAVERI:
 111		chip_name = "kaveri";
 112		break;
 113	case CHIP_KABINI:
 114		chip_name = "kabini";
 115		break;
 116	case CHIP_MULLINS:
 117		chip_name = "mullins";
 118		break;
 119	default: BUG();
 120	}
 121
 122	for (i = 0; i < adev->sdma.num_instances; i++) {
 123		if (i == 0)
 124			snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
 125		else
 126			snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
 127		err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
 128		if (err)
 129			goto out;
 130		err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
 131	}
 132out:
 133	if (err) {
 134		printk(KERN_ERR
 135		       "cik_sdma: Failed to load firmware \"%s\"\n",
 136		       fw_name);
 137		for (i = 0; i < adev->sdma.num_instances; i++) {
 138			release_firmware(adev->sdma.instance[i].fw);
 139			adev->sdma.instance[i].fw = NULL;
 140		}
 141	}
 142	return err;
 143}
 144
 145/**
 146 * cik_sdma_ring_get_rptr - get the current read pointer
 147 *
 148 * @ring: amdgpu ring pointer
 149 *
 150 * Get the current rptr from the hardware (CIK+).
 151 */
 152static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
 153{
 154	u32 rptr;
 155
 156	rptr = ring->adev->wb.wb[ring->rptr_offs];
 157
 158	return (rptr & 0x3fffc) >> 2;
 159}
 160
 161/**
 162 * cik_sdma_ring_get_wptr - get the current write pointer
 163 *
 164 * @ring: amdgpu ring pointer
 165 *
 166 * Get the current wptr from the hardware (CIK+).
 167 */
 168static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
 169{
 170	struct amdgpu_device *adev = ring->adev;
 171	u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
 172
 173	return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
 174}
 175
 176/**
 177 * cik_sdma_ring_set_wptr - commit the write pointer
 178 *
 179 * @ring: amdgpu ring pointer
 180 *
 181 * Write the wptr back to the hardware (CIK+).
 182 */
 183static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
 184{
 185	struct amdgpu_device *adev = ring->adev;
 186	u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
 187
 188	WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
 
 189}
 190
 191static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 192{
 193	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
 194	int i;
 195
 196	for (i = 0; i < count; i++)
 197		if (sdma && sdma->burst_nop && (i == 0))
 198			amdgpu_ring_write(ring, ring->nop |
 199					  SDMA_NOP_COUNT(count - 1));
 200		else
 201			amdgpu_ring_write(ring, ring->nop);
 202}
 203
 204/**
 205 * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine
 206 *
 207 * @ring: amdgpu ring pointer
 
 208 * @ib: IB object to schedule
 
 209 *
 210 * Schedule an IB in the DMA ring (CIK).
 211 */
 212static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
 213			   struct amdgpu_ib *ib)
 
 
 214{
 215	u32 extra_bits = ib->vm_id & 0xf;
 216	u32 next_rptr = ring->wptr + 5;
 217
 218	while ((next_rptr & 7) != 4)
 219		next_rptr++;
 220
 221	next_rptr += 4;
 222	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
 223	amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
 224	amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
 225	amdgpu_ring_write(ring, 1); /* number of DWs to follow */
 226	amdgpu_ring_write(ring, next_rptr);
 227
 228	/* IB packet must end on a 8 DW boundary */
 229	cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8);
 230
 231	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
 232	amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
 233	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
 234	amdgpu_ring_write(ring, ib->length_dw);
 235
 236}
 237
 238/**
 239 * cik_sdma_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
 240 *
 241 * @ring: amdgpu ring pointer
 242 *
 243 * Emit an hdp flush packet on the requested DMA ring.
 244 */
 245static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 246{
 247	u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
 248			  SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
 249	u32 ref_and_mask;
 250
 251	if (ring == &ring->adev->sdma.instance[0].ring)
 252		ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
 253	else
 254		ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
 255
 256	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
 257	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
 258	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
 259	amdgpu_ring_write(ring, ref_and_mask); /* reference */
 260	amdgpu_ring_write(ring, ref_and_mask); /* mask */
 261	amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
 262}
 263
 264static void cik_sdma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
 265{
 266	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
 267	amdgpu_ring_write(ring, mmHDP_DEBUG0);
 268	amdgpu_ring_write(ring, 1);
 269}
 270
 271/**
 272 * cik_sdma_ring_emit_fence - emit a fence on the DMA ring
 273 *
 274 * @ring: amdgpu ring pointer
 275 * @fence: amdgpu fence object
 
 
 276 *
 277 * Add a DMA fence packet to the ring to write
 278 * the fence seq number and DMA trap packet to generate
 279 * an interrupt if needed (CIK).
 280 */
 281static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 282				     unsigned flags)
 283{
 284	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
 285	/* write the fence */
 286	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
 287	amdgpu_ring_write(ring, lower_32_bits(addr));
 288	amdgpu_ring_write(ring, upper_32_bits(addr));
 289	amdgpu_ring_write(ring, lower_32_bits(seq));
 290
 291	/* optionally write high bits as well */
 292	if (write64bit) {
 293		addr += 4;
 294		amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
 295		amdgpu_ring_write(ring, lower_32_bits(addr));
 296		amdgpu_ring_write(ring, upper_32_bits(addr));
 297		amdgpu_ring_write(ring, upper_32_bits(seq));
 298	}
 299
 300	/* generate an interrupt */
 301	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
 302}
 303
 304/**
 305 * cik_sdma_gfx_stop - stop the gfx async dma engines
 306 *
 307 * @adev: amdgpu_device pointer
 308 *
 309 * Stop the gfx async dma ring buffers (CIK).
 310 */
 311static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
 312{
 313	struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
 314	struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
 315	u32 rb_cntl;
 316	int i;
 317
 318	if ((adev->mman.buffer_funcs_ring == sdma0) ||
 319	    (adev->mman.buffer_funcs_ring == sdma1))
 320		amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
 321
 322	for (i = 0; i < adev->sdma.num_instances; i++) {
 323		rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
 324		rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
 325		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 326		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
 327	}
 328	sdma0->ready = false;
 329	sdma1->ready = false;
 330}
 331
 332/**
 333 * cik_sdma_rlc_stop - stop the compute async dma engines
 334 *
 335 * @adev: amdgpu_device pointer
 336 *
 337 * Stop the compute async dma queues (CIK).
 338 */
 339static void cik_sdma_rlc_stop(struct amdgpu_device *adev)
 340{
 341	/* XXX todo */
 342}
 343
 344/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 345 * cik_sdma_enable - stop the async dma engines
 346 *
 347 * @adev: amdgpu_device pointer
 348 * @enable: enable/disable the DMA MEs.
 349 *
 350 * Halt or unhalt the async dma engines (CIK).
 351 */
 352static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
 353{
 354	u32 me_cntl;
 355	int i;
 356
 357	if (enable == false) {
 358		cik_sdma_gfx_stop(adev);
 359		cik_sdma_rlc_stop(adev);
 360	}
 361
 362	for (i = 0; i < adev->sdma.num_instances; i++) {
 363		me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
 364		if (enable)
 365			me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK;
 366		else
 367			me_cntl |= SDMA0_F32_CNTL__HALT_MASK;
 368		WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], me_cntl);
 369	}
 370}
 371
 372/**
 373 * cik_sdma_gfx_resume - setup and start the async dma engines
 374 *
 375 * @adev: amdgpu_device pointer
 376 *
 377 * Set up the gfx DMA ring buffers and enable them (CIK).
 378 * Returns 0 for success, error for failure.
 379 */
 380static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
 381{
 382	struct amdgpu_ring *ring;
 383	u32 rb_cntl, ib_cntl;
 384	u32 rb_bufsz;
 385	u32 wb_offset;
 386	int i, j, r;
 387
 388	for (i = 0; i < adev->sdma.num_instances; i++) {
 389		ring = &adev->sdma.instance[i].ring;
 390		wb_offset = (ring->rptr_offs * 4);
 391
 392		mutex_lock(&adev->srbm_mutex);
 393		for (j = 0; j < 16; j++) {
 394			cik_srbm_select(adev, 0, 0, 0, j);
 395			/* SDMA GFX */
 396			WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
 397			WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
 398			/* XXX SDMA RLC - todo */
 399		}
 400		cik_srbm_select(adev, 0, 0, 0, 0);
 401		mutex_unlock(&adev->srbm_mutex);
 402
 403		WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
 404		       adev->gfx.config.gb_addr_config & 0x70);
 405
 406		WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
 407		WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
 408
 409		/* Set ring buffer size in dwords */
 410		rb_bufsz = order_base_2(ring->ring_size / 4);
 411		rb_cntl = rb_bufsz << 1;
 412#ifdef __BIG_ENDIAN
 413		rb_cntl |= SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK |
 414			SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK;
 415#endif
 416		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 417
 418		/* Initialize the ring buffer's read and write pointers */
 419		WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
 420		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
 
 
 421
 422		/* set the wb address whether it's enabled or not */
 423		WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
 424		       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
 425		WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
 426		       ((adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
 427
 428		rb_cntl |= SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK;
 429
 430		WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
 431		WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
 432
 433		ring->wptr = 0;
 434		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
 435
 436		/* enable DMA RB */
 437		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i],
 438		       rb_cntl | SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK);
 439
 440		ib_cntl = SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK;
 441#ifdef __BIG_ENDIAN
 442		ib_cntl |= SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK;
 443#endif
 444		/* enable DMA IBs */
 445		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
 446
 447		ring->ready = true;
 448
 449		r = amdgpu_ring_test_ring(ring);
 450		if (r) {
 451			ring->ready = false;
 
 452			return r;
 453		}
 454
 455		if (adev->mman.buffer_funcs_ring == ring)
 456			amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
 457	}
 458
 459	return 0;
 460}
 461
 462/**
 463 * cik_sdma_rlc_resume - setup and start the async dma engines
 464 *
 465 * @adev: amdgpu_device pointer
 466 *
 467 * Set up the compute DMA queues and enable them (CIK).
 468 * Returns 0 for success, error for failure.
 469 */
 470static int cik_sdma_rlc_resume(struct amdgpu_device *adev)
 471{
 472	/* XXX todo */
 473	return 0;
 474}
 475
 476/**
 477 * cik_sdma_load_microcode - load the sDMA ME ucode
 478 *
 479 * @adev: amdgpu_device pointer
 480 *
 481 * Loads the sDMA0/1 ucode.
 482 * Returns 0 for success, -EINVAL if the ucode is not available.
 483 */
 484static int cik_sdma_load_microcode(struct amdgpu_device *adev)
 485{
 486	const struct sdma_firmware_header_v1_0 *hdr;
 487	const __le32 *fw_data;
 488	u32 fw_size;
 489	int i, j;
 490
 491	/* halt the MEs */
 492	cik_sdma_enable(adev, false);
 493
 494	for (i = 0; i < adev->sdma.num_instances; i++) {
 495		if (!adev->sdma.instance[i].fw)
 496			return -EINVAL;
 497		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
 498		amdgpu_ucode_print_sdma_hdr(&hdr->header);
 499		fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 500		adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
 501		adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
 502		if (adev->sdma.instance[i].feature_version >= 20)
 503			adev->sdma.instance[i].burst_nop = true;
 504		fw_data = (const __le32 *)
 505			(adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 506		WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
 507		for (j = 0; j < fw_size; j++)
 508			WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
 509		WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
 510	}
 511
 512	return 0;
 513}
 514
 515/**
 516 * cik_sdma_start - setup and start the async dma engines
 517 *
 518 * @adev: amdgpu_device pointer
 519 *
 520 * Set up the DMA engines and enable them (CIK).
 521 * Returns 0 for success, error for failure.
 522 */
 523static int cik_sdma_start(struct amdgpu_device *adev)
 524{
 525	int r;
 526
 527	r = cik_sdma_load_microcode(adev);
 528	if (r)
 529		return r;
 530
 531	/* unhalt the MEs */
 532	cik_sdma_enable(adev, true);
 
 
 533
 534	/* start the gfx rings and rlc compute queues */
 535	r = cik_sdma_gfx_resume(adev);
 536	if (r)
 537		return r;
 538	r = cik_sdma_rlc_resume(adev);
 539	if (r)
 540		return r;
 541
 542	return 0;
 543}
 544
 545/**
 546 * cik_sdma_ring_test_ring - simple async dma engine test
 547 *
 548 * @ring: amdgpu_ring structure holding ring information
 549 *
 550 * Test the DMA engine by writing using it to write an
 551 * value to memory. (CIK).
 552 * Returns 0 for success, error for failure.
 553 */
 554static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
 555{
 556	struct amdgpu_device *adev = ring->adev;
 557	unsigned i;
 558	unsigned index;
 559	int r;
 560	u32 tmp;
 561	u64 gpu_addr;
 562
 563	r = amdgpu_wb_get(adev, &index);
 564	if (r) {
 565		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
 566		return r;
 567	}
 568
 569	gpu_addr = adev->wb.gpu_addr + (index * 4);
 570	tmp = 0xCAFEDEAD;
 571	adev->wb.wb[index] = cpu_to_le32(tmp);
 572
 573	r = amdgpu_ring_alloc(ring, 5);
 574	if (r) {
 575		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
 576		amdgpu_wb_free(adev, index);
 577		return r;
 578	}
 579	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
 580	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
 581	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
 582	amdgpu_ring_write(ring, 1); /* number of DWs to follow */
 583	amdgpu_ring_write(ring, 0xDEADBEEF);
 584	amdgpu_ring_commit(ring);
 585
 586	for (i = 0; i < adev->usec_timeout; i++) {
 587		tmp = le32_to_cpu(adev->wb.wb[index]);
 588		if (tmp == 0xDEADBEEF)
 589			break;
 590		DRM_UDELAY(1);
 591	}
 592
 593	if (i < adev->usec_timeout) {
 594		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
 595	} else {
 596		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
 597			  ring->idx, tmp);
 598		r = -EINVAL;
 599	}
 600	amdgpu_wb_free(adev, index);
 601
 
 
 602	return r;
 603}
 604
 605/**
 606 * cik_sdma_ring_test_ib - test an IB on the DMA engine
 607 *
 608 * @ring: amdgpu_ring structure holding ring information
 
 609 *
 610 * Test a simple IB in the DMA ring (CIK).
 611 * Returns 0 on success, error on failure.
 612 */
 613static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
 614{
 615	struct amdgpu_device *adev = ring->adev;
 616	struct amdgpu_ib ib;
 617	struct fence *f = NULL;
 618	unsigned i;
 619	unsigned index;
 620	int r;
 621	u32 tmp = 0;
 622	u64 gpu_addr;
 
 623
 624	r = amdgpu_wb_get(adev, &index);
 625	if (r) {
 626		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
 627		return r;
 628	}
 629
 630	gpu_addr = adev->wb.gpu_addr + (index * 4);
 631	tmp = 0xCAFEDEAD;
 632	adev->wb.wb[index] = cpu_to_le32(tmp);
 633	memset(&ib, 0, sizeof(ib));
 634	r = amdgpu_ib_get(adev, NULL, 256, &ib);
 635	if (r) {
 636		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
 637		goto err0;
 638	}
 639
 640	ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
 
 641	ib.ptr[1] = lower_32_bits(gpu_addr);
 642	ib.ptr[2] = upper_32_bits(gpu_addr);
 643	ib.ptr[3] = 1;
 644	ib.ptr[4] = 0xDEADBEEF;
 645	ib.length_dw = 5;
 646	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 647	if (r)
 648		goto err1;
 649
 650	r = fence_wait(f, false);
 651	if (r) {
 652		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 
 
 653		goto err1;
 654	}
 655	for (i = 0; i < adev->usec_timeout; i++) {
 656		tmp = le32_to_cpu(adev->wb.wb[index]);
 657		if (tmp == 0xDEADBEEF)
 658			break;
 659		DRM_UDELAY(1);
 660	}
 661	if (i < adev->usec_timeout) {
 662		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
 663			 ring->idx, i);
 664		goto err1;
 665	} else {
 666		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
 667		r = -EINVAL;
 668	}
 669
 670err1:
 671	fence_put(f);
 672	amdgpu_ib_free(adev, &ib, NULL);
 673	fence_put(f);
 674err0:
 675	amdgpu_wb_free(adev, index);
 676	return r;
 677}
 678
 679/**
 680 * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
 681 *
 682 * @ib: indirect buffer to fill with commands
 683 * @pe: addr of the page entry
 684 * @src: src addr to copy from
 685 * @count: number of page entries to update
 686 *
 687 * Update PTEs by copying them from the GART using sDMA (CIK).
 688 */
 689static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
 690				 uint64_t pe, uint64_t src,
 691				 unsigned count)
 692{
 693	while (count) {
 694		unsigned bytes = count * 8;
 695		if (bytes > 0x1FFFF8)
 696			bytes = 0x1FFFF8;
 697
 698		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
 699			SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
 700		ib->ptr[ib->length_dw++] = bytes;
 701		ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
 702		ib->ptr[ib->length_dw++] = lower_32_bits(src);
 703		ib->ptr[ib->length_dw++] = upper_32_bits(src);
 704		ib->ptr[ib->length_dw++] = lower_32_bits(pe);
 705		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 706
 707		pe += bytes;
 708		src += bytes;
 709		count -= bytes / 8;
 710	}
 711}
 712
 713/**
 714 * cik_sdma_vm_write_pages - update PTEs by writing them manually
 715 *
 716 * @ib: indirect buffer to fill with commands
 717 * @pe: addr of the page entry
 718 * @addr: dst addr to write into pe
 719 * @count: number of page entries to update
 720 * @incr: increase next addr by incr bytes
 721 * @flags: access flags
 722 *
 723 * Update PTEs by writing them manually using sDMA (CIK).
 724 */
 725static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
 726				  const dma_addr_t *pages_addr, uint64_t pe,
 727				  uint64_t addr, unsigned count,
 728				  uint32_t incr, uint32_t flags)
 729{
 730	uint64_t value;
 731	unsigned ndw;
 732
 733	while (count) {
 734		ndw = count * 2;
 735		if (ndw > 0xFFFFE)
 736			ndw = 0xFFFFE;
 737
 738		/* for non-physically contiguous pages (system) */
 739		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
 740			SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
 741		ib->ptr[ib->length_dw++] = pe;
 742		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 743		ib->ptr[ib->length_dw++] = ndw;
 744		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
 745			value = amdgpu_vm_map_gart(pages_addr, addr);
 746			addr += incr;
 747			value |= flags;
 748			ib->ptr[ib->length_dw++] = value;
 749			ib->ptr[ib->length_dw++] = upper_32_bits(value);
 750		}
 751	}
 752}
 753
 754/**
 755 * cik_sdma_vm_set_pages - update the page tables using sDMA
 756 *
 757 * @ib: indirect buffer to fill with commands
 758 * @pe: addr of the page entry
 759 * @addr: dst addr to write into pe
 760 * @count: number of page entries to update
 761 * @incr: increase next addr by incr bytes
 762 * @flags: access flags
 763 *
 764 * Update the page tables using sDMA (CIK).
 765 */
 766static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
 767				    uint64_t pe,
 768				    uint64_t addr, unsigned count,
 769				    uint32_t incr, uint32_t flags)
 770{
 771	uint64_t value;
 772	unsigned ndw;
 773
 774	while (count) {
 775		ndw = count;
 776		if (ndw > 0x7FFFF)
 777			ndw = 0x7FFFF;
 778
 779		if (flags & AMDGPU_PTE_VALID)
 780			value = addr;
 781		else
 782			value = 0;
 783
 784		/* for physically contiguous pages (vram) */
 785		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
 786		ib->ptr[ib->length_dw++] = pe; /* dst addr */
 787		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 788		ib->ptr[ib->length_dw++] = flags; /* mask */
 789		ib->ptr[ib->length_dw++] = 0;
 790		ib->ptr[ib->length_dw++] = value; /* value */
 791		ib->ptr[ib->length_dw++] = upper_32_bits(value);
 792		ib->ptr[ib->length_dw++] = incr; /* increment size */
 793		ib->ptr[ib->length_dw++] = 0;
 794		ib->ptr[ib->length_dw++] = ndw; /* number of entries */
 795
 796		pe += ndw * 8;
 797		addr += ndw * incr;
 798		count -= ndw;
 799	}
 800}
 801
 802/**
 803 * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
 804 *
 
 805 * @ib: indirect buffer to fill with padding
 806 *
 807 */
 808static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 809{
 810	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
 811	u32 pad_count;
 812	int i;
 813
 814	pad_count = (8 - (ib->length_dw & 0x7)) % 8;
 815	for (i = 0; i < pad_count; i++)
 816		if (sdma && sdma->burst_nop && (i == 0))
 817			ib->ptr[ib->length_dw++] =
 818					SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0) |
 819					SDMA_NOP_COUNT(pad_count - 1);
 820		else
 821			ib->ptr[ib->length_dw++] =
 822					SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
 823}
 824
 825/**
 826 * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
 827 *
 828 * @ring: amdgpu_ring pointer
 829 *
 830 * Make sure all previous operations are completed (CIK).
 831 */
 832static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 833{
 834	uint32_t seq = ring->fence_drv.sync_seq;
 835	uint64_t addr = ring->fence_drv.gpu_addr;
 836
 837	/* wait for idle */
 838	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0,
 839					    SDMA_POLL_REG_MEM_EXTRA_OP(0) |
 840					    SDMA_POLL_REG_MEM_EXTRA_FUNC(3) | /* equal */
 841					    SDMA_POLL_REG_MEM_EXTRA_M));
 842	amdgpu_ring_write(ring, addr & 0xfffffffc);
 843	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
 844	amdgpu_ring_write(ring, seq); /* reference */
 845	amdgpu_ring_write(ring, 0xfffffff); /* mask */
 846	amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
 847}
 848
 849/**
 850 * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA
 851 *
 852 * @ring: amdgpu_ring pointer
 853 * @vm: amdgpu_vm pointer
 
 854 *
 855 * Update the page table base and flush the VM TLB
 856 * using sDMA (CIK).
 857 */
 858static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
 859					unsigned vm_id, uint64_t pd_addr)
 860{
 861	u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
 862			  SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
 863
 864	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
 865	if (vm_id < 8) {
 866		amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
 867	} else {
 868		amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
 869	}
 870	amdgpu_ring_write(ring, pd_addr >> 12);
 871
 872	/* flush TLB */
 873	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
 874	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
 875	amdgpu_ring_write(ring, 1 << vm_id);
 876
 877	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
 878	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
 879	amdgpu_ring_write(ring, 0);
 880	amdgpu_ring_write(ring, 0); /* reference */
 881	amdgpu_ring_write(ring, 0); /* mask */
 882	amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
 883}
 884
 
 
 
 
 
 
 
 
 885static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
 886				 bool enable)
 887{
 888	u32 orig, data;
 889
 890	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
 891		WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
 892		WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
 893	} else {
 894		orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
 895		data |= 0xff000000;
 896		if (data != orig)
 897			WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
 898
 899		orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
 900		data |= 0xff000000;
 901		if (data != orig)
 902			WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
 903	}
 904}
 905
 906static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
 907				 bool enable)
 908{
 909	u32 orig, data;
 910
 911	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
 912		orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
 913		data |= 0x100;
 914		if (orig != data)
 915			WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
 916
 917		orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
 918		data |= 0x100;
 919		if (orig != data)
 920			WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
 921	} else {
 922		orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
 923		data &= ~0x100;
 924		if (orig != data)
 925			WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
 926
 927		orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
 928		data &= ~0x100;
 929		if (orig != data)
 930			WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
 931	}
 932}
 933
 934static int cik_sdma_early_init(void *handle)
 935{
 936	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 937
 938	adev->sdma.num_instances = SDMA_MAX_INSTANCE;
 939
 
 
 
 
 940	cik_sdma_set_ring_funcs(adev);
 941	cik_sdma_set_irq_funcs(adev);
 942	cik_sdma_set_buffer_funcs(adev);
 943	cik_sdma_set_vm_pte_funcs(adev);
 944
 945	return 0;
 946}
 947
 948static int cik_sdma_sw_init(void *handle)
 949{
 950	struct amdgpu_ring *ring;
 951	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 952	int r, i;
 953
 954	r = cik_sdma_init_microcode(adev);
 955	if (r) {
 956		DRM_ERROR("Failed to load sdma firmware!\n");
 957		return r;
 958	}
 959
 960	/* SDMA trap event */
 961	r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
 
 962	if (r)
 963		return r;
 964
 965	/* SDMA Privileged inst */
 966	r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
 
 967	if (r)
 968		return r;
 969
 970	/* SDMA Privileged inst */
 971	r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
 
 972	if (r)
 973		return r;
 974
 975	for (i = 0; i < adev->sdma.num_instances; i++) {
 976		ring = &adev->sdma.instance[i].ring;
 977		ring->ring_obj = NULL;
 978		sprintf(ring->name, "sdma%d", i);
 979		r = amdgpu_ring_init(adev, ring, 256 * 1024,
 980				     SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
 981				     &adev->sdma.trap_irq,
 982				     (i == 0) ?
 983				     AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
 984				     AMDGPU_RING_TYPE_SDMA);
 985		if (r)
 986			return r;
 987	}
 988
 989	return r;
 990}
 991
 992static int cik_sdma_sw_fini(void *handle)
 993{
 994	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 995	int i;
 996
 997	for (i = 0; i < adev->sdma.num_instances; i++)
 998		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 999
 
1000	return 0;
1001}
1002
1003static int cik_sdma_hw_init(void *handle)
1004{
1005	int r;
1006	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1007
1008	r = cik_sdma_start(adev);
1009	if (r)
1010		return r;
1011
1012	return r;
1013}
1014
1015static int cik_sdma_hw_fini(void *handle)
1016{
1017	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1018
 
1019	cik_sdma_enable(adev, false);
1020
1021	return 0;
1022}
1023
1024static int cik_sdma_suspend(void *handle)
1025{
1026	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1027
1028	return cik_sdma_hw_fini(adev);
1029}
1030
1031static int cik_sdma_resume(void *handle)
1032{
1033	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1034
 
 
1035	return cik_sdma_hw_init(adev);
1036}
1037
1038static bool cik_sdma_is_idle(void *handle)
1039{
1040	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1041	u32 tmp = RREG32(mmSRBM_STATUS2);
1042
1043	if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
1044				SRBM_STATUS2__SDMA1_BUSY_MASK))
1045	    return false;
1046
1047	return true;
1048}
1049
1050static int cik_sdma_wait_for_idle(void *handle)
1051{
1052	unsigned i;
1053	u32 tmp;
1054	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1055
1056	for (i = 0; i < adev->usec_timeout; i++) {
1057		tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
1058				SRBM_STATUS2__SDMA1_BUSY_MASK);
1059
1060		if (!tmp)
1061			return 0;
1062		udelay(1);
1063	}
1064	return -ETIMEDOUT;
1065}
1066
1067static void cik_sdma_print_status(void *handle)
1068{
1069	int i, j;
1070	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1071
1072	dev_info(adev->dev, "CIK SDMA registers\n");
1073	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
1074		 RREG32(mmSRBM_STATUS2));
1075	for (i = 0; i < adev->sdma.num_instances; i++) {
1076		dev_info(adev->dev, "  SDMA%d_STATUS_REG=0x%08X\n",
1077			 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
1078		dev_info(adev->dev, "  SDMA%d_ME_CNTL=0x%08X\n",
1079			 i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
1080		dev_info(adev->dev, "  SDMA%d_CNTL=0x%08X\n",
1081			 i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
1082		dev_info(adev->dev, "  SDMA%d_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
1083			 i, RREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i]));
1084		dev_info(adev->dev, "  SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
1085			 i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
1086		dev_info(adev->dev, "  SDMA%d_GFX_IB_CNTL=0x%08X\n",
1087			 i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
1088		dev_info(adev->dev, "  SDMA%d_GFX_RB_CNTL=0x%08X\n",
1089			 i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
1090		dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR=0x%08X\n",
1091			 i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
1092		dev_info(adev->dev, "  SDMA%d_GFX_RB_WPTR=0x%08X\n",
1093			 i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
1094		dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
1095			 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
1096		dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
1097			 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
1098		dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE=0x%08X\n",
1099			 i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
1100		dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
1101			 i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
1102		dev_info(adev->dev, "  SDMA%d_TILING_CONFIG=0x%08X\n",
1103			 i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i]));
1104		mutex_lock(&adev->srbm_mutex);
1105		for (j = 0; j < 16; j++) {
1106			cik_srbm_select(adev, 0, 0, 0, j);
1107			dev_info(adev->dev, "  VM %d:\n", j);
1108			dev_info(adev->dev, "  SDMA0_GFX_VIRTUAL_ADDR=0x%08X\n",
1109				 RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
1110			dev_info(adev->dev, "  SDMA0_GFX_APE1_CNTL=0x%08X\n",
1111				 RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
1112		}
1113		cik_srbm_select(adev, 0, 0, 0, 0);
1114		mutex_unlock(&adev->srbm_mutex);
1115	}
1116}
1117
1118static int cik_sdma_soft_reset(void *handle)
1119{
1120	u32 srbm_soft_reset = 0;
1121	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1122	u32 tmp = RREG32(mmSRBM_STATUS2);
1123
1124	if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
1125		/* sdma0 */
1126		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
1127		tmp |= SDMA0_F32_CNTL__HALT_MASK;
1128		WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
1129		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
1130	}
1131	if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
1132		/* sdma1 */
1133		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
1134		tmp |= SDMA0_F32_CNTL__HALT_MASK;
1135		WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
1136		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
1137	}
1138
1139	if (srbm_soft_reset) {
1140		cik_sdma_print_status((void *)adev);
1141
1142		tmp = RREG32(mmSRBM_SOFT_RESET);
1143		tmp |= srbm_soft_reset;
1144		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1145		WREG32(mmSRBM_SOFT_RESET, tmp);
1146		tmp = RREG32(mmSRBM_SOFT_RESET);
1147
1148		udelay(50);
1149
1150		tmp &= ~srbm_soft_reset;
1151		WREG32(mmSRBM_SOFT_RESET, tmp);
1152		tmp = RREG32(mmSRBM_SOFT_RESET);
1153
1154		/* Wait a little for things to settle down */
1155		udelay(50);
1156
1157		cik_sdma_print_status((void *)adev);
1158	}
1159
1160	return 0;
1161}
1162
1163static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
1164				       struct amdgpu_irq_src *src,
1165				       unsigned type,
1166				       enum amdgpu_interrupt_state state)
1167{
1168	u32 sdma_cntl;
1169
1170	switch (type) {
1171	case AMDGPU_SDMA_IRQ_TRAP0:
1172		switch (state) {
1173		case AMDGPU_IRQ_STATE_DISABLE:
1174			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1175			sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
1176			WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1177			break;
1178		case AMDGPU_IRQ_STATE_ENABLE:
1179			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1180			sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
1181			WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1182			break;
1183		default:
1184			break;
1185		}
1186		break;
1187	case AMDGPU_SDMA_IRQ_TRAP1:
1188		switch (state) {
1189		case AMDGPU_IRQ_STATE_DISABLE:
1190			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1191			sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
1192			WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1193			break;
1194		case AMDGPU_IRQ_STATE_ENABLE:
1195			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1196			sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
1197			WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1198			break;
1199		default:
1200			break;
1201		}
1202		break;
1203	default:
1204		break;
1205	}
1206	return 0;
1207}
1208
1209static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
1210				     struct amdgpu_irq_src *source,
1211				     struct amdgpu_iv_entry *entry)
1212{
1213	u8 instance_id, queue_id;
1214
1215	instance_id = (entry->ring_id & 0x3) >> 0;
1216	queue_id = (entry->ring_id & 0xc) >> 2;
1217	DRM_DEBUG("IH: SDMA trap\n");
1218	switch (instance_id) {
1219	case 0:
1220		switch (queue_id) {
1221		case 0:
1222			amdgpu_fence_process(&adev->sdma.instance[0].ring);
1223			break;
1224		case 1:
1225			/* XXX compute */
1226			break;
1227		case 2:
1228			/* XXX compute */
1229			break;
1230		}
1231		break;
1232	case 1:
1233		switch (queue_id) {
1234		case 0:
1235			amdgpu_fence_process(&adev->sdma.instance[1].ring);
1236			break;
1237		case 1:
1238			/* XXX compute */
1239			break;
1240		case 2:
1241			/* XXX compute */
1242			break;
1243		}
1244		break;
1245	}
1246
1247	return 0;
1248}
1249
1250static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
1251					     struct amdgpu_irq_src *source,
1252					     struct amdgpu_iv_entry *entry)
1253{
 
 
1254	DRM_ERROR("Illegal instruction in SDMA command stream\n");
1255	schedule_work(&adev->reset_work);
 
1256	return 0;
1257}
1258
1259static int cik_sdma_set_clockgating_state(void *handle,
1260					  enum amd_clockgating_state state)
1261{
1262	bool gate = false;
1263	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1264
1265	if (state == AMD_CG_STATE_GATE)
1266		gate = true;
1267
1268	cik_enable_sdma_mgcg(adev, gate);
1269	cik_enable_sdma_mgls(adev, gate);
1270
1271	return 0;
1272}
1273
1274static int cik_sdma_set_powergating_state(void *handle,
1275					  enum amd_powergating_state state)
1276{
1277	return 0;
1278}
1279
1280const struct amd_ip_funcs cik_sdma_ip_funcs = {
 
1281	.early_init = cik_sdma_early_init,
1282	.late_init = NULL,
1283	.sw_init = cik_sdma_sw_init,
1284	.sw_fini = cik_sdma_sw_fini,
1285	.hw_init = cik_sdma_hw_init,
1286	.hw_fini = cik_sdma_hw_fini,
1287	.suspend = cik_sdma_suspend,
1288	.resume = cik_sdma_resume,
1289	.is_idle = cik_sdma_is_idle,
1290	.wait_for_idle = cik_sdma_wait_for_idle,
1291	.soft_reset = cik_sdma_soft_reset,
1292	.print_status = cik_sdma_print_status,
1293	.set_clockgating_state = cik_sdma_set_clockgating_state,
1294	.set_powergating_state = cik_sdma_set_powergating_state,
1295};
1296
1297static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
 
 
 
 
1298	.get_rptr = cik_sdma_ring_get_rptr,
1299	.get_wptr = cik_sdma_ring_get_wptr,
1300	.set_wptr = cik_sdma_ring_set_wptr,
1301	.parse_cs = NULL,
 
 
 
 
 
 
1302	.emit_ib = cik_sdma_ring_emit_ib,
1303	.emit_fence = cik_sdma_ring_emit_fence,
1304	.emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
1305	.emit_vm_flush = cik_sdma_ring_emit_vm_flush,
1306	.emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
1307	.emit_hdp_invalidate = cik_sdma_ring_emit_hdp_invalidate,
1308	.test_ring = cik_sdma_ring_test_ring,
1309	.test_ib = cik_sdma_ring_test_ib,
1310	.insert_nop = cik_sdma_ring_insert_nop,
1311	.pad_ib = cik_sdma_ring_pad_ib,
 
1312};
1313
1314static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
1315{
1316	int i;
1317
1318	for (i = 0; i < adev->sdma.num_instances; i++)
1319		adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
 
 
1320}
1321
1322static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
1323	.set = cik_sdma_set_trap_irq_state,
1324	.process = cik_sdma_process_trap_irq,
1325};
1326
1327static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = {
1328	.process = cik_sdma_process_illegal_inst_irq,
1329};
1330
1331static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
1332{
1333	adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1334	adev->sdma.trap_irq.funcs = &cik_sdma_trap_irq_funcs;
1335	adev->sdma.illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
1336}
1337
1338/**
1339 * cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine
1340 *
1341 * @ring: amdgpu_ring structure holding ring information
1342 * @src_offset: src GPU address
1343 * @dst_offset: dst GPU address
1344 * @byte_count: number of bytes to xfer
 
1345 *
1346 * Copy GPU buffers using the DMA engine (CIK).
1347 * Used by the amdgpu ttm implementation to move pages if
1348 * registered as the asic copy callback.
1349 */
1350static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
1351				      uint64_t src_offset,
1352				      uint64_t dst_offset,
1353				      uint32_t byte_count)
 
1354{
1355	ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
1356	ib->ptr[ib->length_dw++] = byte_count;
1357	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1358	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1359	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1360	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1361	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1362}
1363
1364/**
1365 * cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine
1366 *
1367 * @ring: amdgpu_ring structure holding ring information
1368 * @src_data: value to write to buffer
1369 * @dst_offset: dst GPU address
1370 * @byte_count: number of bytes to xfer
1371 *
1372 * Fill GPU buffers using the DMA engine (CIK).
1373 */
1374static void cik_sdma_emit_fill_buffer(struct amdgpu_ib *ib,
1375				      uint32_t src_data,
1376				      uint64_t dst_offset,
1377				      uint32_t byte_count)
1378{
1379	ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0);
1380	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1381	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1382	ib->ptr[ib->length_dw++] = src_data;
1383	ib->ptr[ib->length_dw++] = byte_count;
1384}
1385
1386static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
1387	.copy_max_bytes = 0x1fffff,
1388	.copy_num_dw = 7,
1389	.emit_copy_buffer = cik_sdma_emit_copy_buffer,
1390
1391	.fill_max_bytes = 0x1fffff,
1392	.fill_num_dw = 5,
1393	.emit_fill_buffer = cik_sdma_emit_fill_buffer,
1394};
1395
1396static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
1397{
1398	if (adev->mman.buffer_funcs == NULL) {
1399		adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
1400		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1401	}
1402}
1403
1404static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
 
1405	.copy_pte = cik_sdma_vm_copy_pte,
 
1406	.write_pte = cik_sdma_vm_write_pte,
1407	.set_pte_pde = cik_sdma_vm_set_pte_pde,
1408};
1409
1410static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
1411{
1412	unsigned i;
1413
1414	if (adev->vm_manager.vm_pte_funcs == NULL) {
1415		adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
1416		for (i = 0; i < adev->sdma.num_instances; i++)
1417			adev->vm_manager.vm_pte_rings[i] =
1418				&adev->sdma.instance[i].ring;
1419
1420		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
1421	}
 
1422}