Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Alex Deucher
 23 */
 24
 25#include "amdgpu.h"
 26#include "amdgpu_trace.h"
 27#include "si.h"
 28#include "sid.h"
 29
 30const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
 31{
 32	DMA0_REGISTER_OFFSET,
 33	DMA1_REGISTER_OFFSET
 34};
 35
 36static void si_dma_set_ring_funcs(struct amdgpu_device *adev);
 37static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
 38static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
 39static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
 40
 41static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
 42{
 43	return *ring->rptr_cpu_addr;
 44}
 45
 46static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
 47{
 48	struct amdgpu_device *adev = ring->adev;
 49	u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
 50
 51	return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
 52}
 53
 54static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
 55{
 56	struct amdgpu_device *adev = ring->adev;
 57	u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
 58
 59	WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
 
 60}
 61
 62static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
 63				struct amdgpu_job *job,
 64				struct amdgpu_ib *ib,
 65				uint32_t flags)
 66{
 67	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 68	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
 69	 * Pad as necessary with NOPs.
 70	 */
 71	while ((lower_32_bits(ring->wptr) & 7) != 5)
 72		amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
 73	amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0));
 74	amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
 75	amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
 76
 77}
 78
 79/**
 80 * si_dma_ring_emit_fence - emit a fence on the DMA ring
 81 *
 82 * @ring: amdgpu ring pointer
 83 * @addr: address
 84 * @seq: sequence number
 85 * @flags: fence related flags
 86 *
 87 * Add a DMA fence packet to the ring to write
 88 * the fence seq number and DMA trap packet to generate
 89 * an interrupt if needed (VI).
 90 */
 91static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 92				      unsigned flags)
 93{
 94
 95	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
 96	/* write the fence */
 97	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
 98	amdgpu_ring_write(ring, addr & 0xfffffffc);
 99	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
100	amdgpu_ring_write(ring, seq);
101	/* optionally write high bits as well */
102	if (write64bit) {
103		addr += 4;
104		amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
105		amdgpu_ring_write(ring, addr & 0xfffffffc);
106		amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
107		amdgpu_ring_write(ring, upper_32_bits(seq));
108	}
109	/* generate an interrupt */
110	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0));
111}
112
113static void si_dma_stop(struct amdgpu_device *adev)
114{
 
115	u32 rb_cntl;
116	unsigned i;
117
118	amdgpu_sdma_unset_buffer_funcs_helper(adev);
119
120	for (i = 0; i < adev->sdma.num_instances; i++) {
 
121		/* dma0 */
122		rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
123		rb_cntl &= ~DMA_RB_ENABLE;
124		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
 
 
 
 
125	}
126}
127
128static int si_dma_start(struct amdgpu_device *adev)
129{
130	struct amdgpu_ring *ring;
131	u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz;
132	int i, r;
133	uint64_t rptr_addr;
134
135	for (i = 0; i < adev->sdma.num_instances; i++) {
136		ring = &adev->sdma.instance[i].ring;
137
138		WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
139		WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
140
141		/* Set ring buffer size in dwords */
142		rb_bufsz = order_base_2(ring->ring_size / 4);
143		rb_cntl = rb_bufsz << 1;
144#ifdef __BIG_ENDIAN
145		rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
146#endif
147		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
148
149		/* Initialize the ring buffer's read and write pointers */
150		WREG32(DMA_RB_RPTR + sdma_offsets[i], 0);
151		WREG32(DMA_RB_WPTR + sdma_offsets[i], 0);
152
153		rptr_addr = ring->rptr_gpu_addr;
154
155		WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
156		WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
157
158		rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
159
160		WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
161
162		/* enable DMA IBs */
163		ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
164#ifdef __BIG_ENDIAN
165		ib_cntl |= DMA_IB_SWAP_ENABLE;
166#endif
167		WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl);
168
169		dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]);
170		dma_cntl &= ~CTXEMPTY_INT_ENABLE;
171		WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
172
173		ring->wptr = 0;
174		WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
175		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
176
177		ring->sched.ready = true;
178
179		r = amdgpu_ring_test_helper(ring);
180		if (r)
 
181			return r;
 
182
183		if (adev->mman.buffer_funcs_ring == ring)
184			amdgpu_ttm_set_buffer_funcs_status(adev, true);
185	}
186
187	return 0;
188}
189
190/**
191 * si_dma_ring_test_ring - simple async dma engine test
192 *
193 * @ring: amdgpu_ring structure holding ring information
194 *
195 * Test the DMA engine by writing using it to write an
196 * value to memory. (VI).
197 * Returns 0 for success, error for failure.
198 */
199static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
200{
201	struct amdgpu_device *adev = ring->adev;
202	unsigned i;
203	unsigned index;
204	int r;
205	u32 tmp;
206	u64 gpu_addr;
207
208	r = amdgpu_device_wb_get(adev, &index);
209	if (r)
 
210		return r;
 
211
212	gpu_addr = adev->wb.gpu_addr + (index * 4);
213	tmp = 0xCAFEDEAD;
214	adev->wb.wb[index] = cpu_to_le32(tmp);
215
216	r = amdgpu_ring_alloc(ring, 4);
217	if (r)
218		goto error_free_wb;
 
 
 
219
220	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
221	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
222	amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
223	amdgpu_ring_write(ring, 0xDEADBEEF);
224	amdgpu_ring_commit(ring);
225
226	for (i = 0; i < adev->usec_timeout; i++) {
227		tmp = le32_to_cpu(adev->wb.wb[index]);
228		if (tmp == 0xDEADBEEF)
229			break;
230		udelay(1);
231	}
232
233	if (i >= adev->usec_timeout)
234		r = -ETIMEDOUT;
235
236error_free_wb:
 
 
 
237	amdgpu_device_wb_free(adev, index);
 
238	return r;
239}
240
241/**
242 * si_dma_ring_test_ib - test an IB on the DMA engine
243 *
244 * @ring: amdgpu_ring structure holding ring information
245 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
246 *
247 * Test a simple IB in the DMA ring (VI).
248 * Returns 0 on success, error on failure.
249 */
250static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
251{
252	struct amdgpu_device *adev = ring->adev;
253	struct amdgpu_ib ib;
254	struct dma_fence *f = NULL;
255	unsigned index;
256	u32 tmp = 0;
257	u64 gpu_addr;
258	long r;
259
260	r = amdgpu_device_wb_get(adev, &index);
261	if (r)
 
262		return r;
 
263
264	gpu_addr = adev->wb.gpu_addr + (index * 4);
265	tmp = 0xCAFEDEAD;
266	adev->wb.wb[index] = cpu_to_le32(tmp);
267	memset(&ib, 0, sizeof(ib));
268	r = amdgpu_ib_get(adev, NULL, 256,
269					AMDGPU_IB_POOL_DIRECT, &ib);
270	if (r)
271		goto err0;
 
272
273	ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
274	ib.ptr[1] = lower_32_bits(gpu_addr);
275	ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
276	ib.ptr[3] = 0xDEADBEEF;
277	ib.length_dw = 4;
278	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
279	if (r)
280		goto err1;
281
282	r = dma_fence_wait_timeout(f, false, timeout);
283	if (r == 0) {
 
284		r = -ETIMEDOUT;
285		goto err1;
286	} else if (r < 0) {
 
287		goto err1;
288	}
289	tmp = le32_to_cpu(adev->wb.wb[index]);
290	if (tmp == 0xDEADBEEF)
 
291		r = 0;
292	else
 
293		r = -EINVAL;
 
294
295err1:
296	amdgpu_ib_free(adev, &ib, NULL);
297	dma_fence_put(f);
298err0:
299	amdgpu_device_wb_free(adev, index);
300	return r;
301}
302
303/**
304 * si_dma_vm_copy_pte - update PTEs by copying them from the GART
305 *
306 * @ib: indirect buffer to fill with commands
307 * @pe: addr of the page entry
308 * @src: src addr to copy from
309 * @count: number of page entries to update
310 *
311 * Update PTEs by copying them from the GART using DMA (SI).
312 */
313static void si_dma_vm_copy_pte(struct amdgpu_ib *ib,
314			       uint64_t pe, uint64_t src,
315			       unsigned count)
316{
317	unsigned bytes = count * 8;
318
319	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
320					      1, 0, 0, bytes);
321	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
322	ib->ptr[ib->length_dw++] = lower_32_bits(src);
323	ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
324	ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
325}
326
327/**
328 * si_dma_vm_write_pte - update PTEs by writing them manually
329 *
330 * @ib: indirect buffer to fill with commands
331 * @pe: addr of the page entry
332 * @value: dst addr to write into pe
333 * @count: number of page entries to update
334 * @incr: increase next addr by incr bytes
335 *
336 * Update PTEs by writing them manually using DMA (SI).
337 */
338static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
339				uint64_t value, unsigned count,
340				uint32_t incr)
341{
342	unsigned ndw = count * 2;
343
344	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
345	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
346	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
347	for (; ndw > 0; ndw -= 2) {
348		ib->ptr[ib->length_dw++] = lower_32_bits(value);
349		ib->ptr[ib->length_dw++] = upper_32_bits(value);
350		value += incr;
351	}
352}
353
354/**
355 * si_dma_vm_set_pte_pde - update the page tables using sDMA
356 *
357 * @ib: indirect buffer to fill with commands
358 * @pe: addr of the page entry
359 * @addr: dst addr to write into pe
360 * @count: number of page entries to update
361 * @incr: increase next addr by incr bytes
362 * @flags: access flags
363 *
364 * Update the page tables using sDMA (CIK).
365 */
366static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib,
367				     uint64_t pe,
368				     uint64_t addr, unsigned count,
369				     uint32_t incr, uint64_t flags)
370{
371	uint64_t value;
372	unsigned ndw;
373
374	while (count) {
375		ndw = count * 2;
376		if (ndw > 0xFFFFE)
377			ndw = 0xFFFFE;
378
379		if (flags & AMDGPU_PTE_VALID)
380			value = addr;
381		else
382			value = 0;
383
384		/* for physically contiguous pages (vram) */
385		ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
386		ib->ptr[ib->length_dw++] = pe; /* dst addr */
387		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
388		ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
389		ib->ptr[ib->length_dw++] = upper_32_bits(flags);
390		ib->ptr[ib->length_dw++] = value; /* value */
391		ib->ptr[ib->length_dw++] = upper_32_bits(value);
392		ib->ptr[ib->length_dw++] = incr; /* increment size */
393		ib->ptr[ib->length_dw++] = 0;
394		pe += ndw * 4;
395		addr += (ndw / 2) * incr;
396		count -= ndw / 2;
397	}
398}
399
400/**
401 * si_dma_ring_pad_ib - pad the IB to the required number of dw
402 *
403 * @ring: amdgpu_ring pointer
404 * @ib: indirect buffer to fill with padding
405 *
406 */
407static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
408{
409	while (ib->length_dw & 0x7)
410		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
411}
412
413/**
414 * si_dma_ring_emit_pipeline_sync - sync the pipeline
415 *
416 * @ring: amdgpu_ring pointer
417 *
418 * Make sure all previous operations are completed (CIK).
419 */
420static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
421{
422	uint32_t seq = ring->fence_drv.sync_seq;
423	uint64_t addr = ring->fence_drv.gpu_addr;
424
425	/* wait for idle */
426	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) |
427			  (1 << 27)); /* Poll memory */
428	amdgpu_ring_write(ring, lower_32_bits(addr));
429	amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */
430	amdgpu_ring_write(ring, 0xffffffff); /* mask */
431	amdgpu_ring_write(ring, seq); /* value */
432	amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */
433}
434
435/**
436 * si_dma_ring_emit_vm_flush - cik vm flush using sDMA
437 *
438 * @ring: amdgpu_ring pointer
439 * @vmid: vmid number to use
440 * @pd_addr: address
441 *
442 * Update the page table base and flush the VM TLB
443 * using sDMA (VI).
444 */
445static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
446				      unsigned vmid, uint64_t pd_addr)
447{
448	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
449
450	/* wait for invalidate to complete */
451	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
452	amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
453	amdgpu_ring_write(ring, 0xff << 16); /* retry */
454	amdgpu_ring_write(ring, 1 << vmid); /* mask */
455	amdgpu_ring_write(ring, 0); /* value */
456	amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
457}
458
459static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring,
460				  uint32_t reg, uint32_t val)
461{
462	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
463	amdgpu_ring_write(ring, (0xf << 16) | reg);
464	amdgpu_ring_write(ring, val);
465}
466
467static int si_dma_early_init(void *handle)
468{
469	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
470
471	adev->sdma.num_instances = 2;
472
473	si_dma_set_ring_funcs(adev);
474	si_dma_set_buffer_funcs(adev);
475	si_dma_set_vm_pte_funcs(adev);
476	si_dma_set_irq_funcs(adev);
477
478	return 0;
479}
480
481static int si_dma_sw_init(void *handle)
482{
483	struct amdgpu_ring *ring;
484	int r, i;
485	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
486
487	/* DMA0 trap event */
488	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224,
489			      &adev->sdma.trap_irq);
490	if (r)
491		return r;
492
493	/* DMA1 trap event */
494	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244,
495			      &adev->sdma.trap_irq);
496	if (r)
497		return r;
498
499	for (i = 0; i < adev->sdma.num_instances; i++) {
500		ring = &adev->sdma.instance[i].ring;
501		ring->ring_obj = NULL;
502		ring->use_doorbell = false;
503		sprintf(ring->name, "sdma%d", i);
504		r = amdgpu_ring_init(adev, ring, 1024,
505				     &adev->sdma.trap_irq,
506				     (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
507				     AMDGPU_SDMA_IRQ_INSTANCE1,
508				     AMDGPU_RING_PRIO_DEFAULT, NULL);
509		if (r)
510			return r;
511	}
512
513	return r;
514}
515
516static int si_dma_sw_fini(void *handle)
517{
518	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
519	int i;
520
521	for (i = 0; i < adev->sdma.num_instances; i++)
522		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
523
524	return 0;
525}
526
527static int si_dma_hw_init(void *handle)
528{
529	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
530
531	return si_dma_start(adev);
532}
533
534static int si_dma_hw_fini(void *handle)
535{
536	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
537
538	si_dma_stop(adev);
539
540	return 0;
541}
542
543static int si_dma_suspend(void *handle)
544{
545	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
546
547	return si_dma_hw_fini(adev);
548}
549
550static int si_dma_resume(void *handle)
551{
552	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
553
554	return si_dma_hw_init(adev);
555}
556
557static bool si_dma_is_idle(void *handle)
558{
559	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
560	u32 tmp = RREG32(SRBM_STATUS2);
561
562	if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
563	    return false;
564
565	return true;
566}
567
568static int si_dma_wait_for_idle(void *handle)
569{
570	unsigned i;
571	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
572
573	for (i = 0; i < adev->usec_timeout; i++) {
574		if (si_dma_is_idle(handle))
575			return 0;
576		udelay(1);
577	}
578	return -ETIMEDOUT;
579}
580
581static int si_dma_soft_reset(void *handle)
582{
583	DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n");
584	return 0;
585}
586
587static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
588					struct amdgpu_irq_src *src,
589					unsigned type,
590					enum amdgpu_interrupt_state state)
591{
592	u32 sdma_cntl;
593
594	switch (type) {
595	case AMDGPU_SDMA_IRQ_INSTANCE0:
596		switch (state) {
597		case AMDGPU_IRQ_STATE_DISABLE:
598			sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
599			sdma_cntl &= ~TRAP_ENABLE;
600			WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
601			break;
602		case AMDGPU_IRQ_STATE_ENABLE:
603			sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
604			sdma_cntl |= TRAP_ENABLE;
605			WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
606			break;
607		default:
608			break;
609		}
610		break;
611	case AMDGPU_SDMA_IRQ_INSTANCE1:
612		switch (state) {
613		case AMDGPU_IRQ_STATE_DISABLE:
614			sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
615			sdma_cntl &= ~TRAP_ENABLE;
616			WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
617			break;
618		case AMDGPU_IRQ_STATE_ENABLE:
619			sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
620			sdma_cntl |= TRAP_ENABLE;
621			WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
622			break;
623		default:
624			break;
625		}
626		break;
627	default:
628		break;
629	}
630	return 0;
631}
632
633static int si_dma_process_trap_irq(struct amdgpu_device *adev,
634				      struct amdgpu_irq_src *source,
635				      struct amdgpu_iv_entry *entry)
636{
637	if (entry->src_id == 224)
638		amdgpu_fence_process(&adev->sdma.instance[0].ring);
639	else
640		amdgpu_fence_process(&adev->sdma.instance[1].ring);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
641	return 0;
642}
643
644static int si_dma_set_clockgating_state(void *handle,
645					  enum amd_clockgating_state state)
646{
647	u32 orig, data, offset;
648	int i;
649	bool enable;
650	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
651
652	enable = (state == AMD_CG_STATE_GATE);
653
654	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
655		for (i = 0; i < adev->sdma.num_instances; i++) {
656			if (i == 0)
657				offset = DMA0_REGISTER_OFFSET;
658			else
659				offset = DMA1_REGISTER_OFFSET;
660			orig = data = RREG32(DMA_POWER_CNTL + offset);
661			data &= ~MEM_POWER_OVERRIDE;
662			if (data != orig)
663				WREG32(DMA_POWER_CNTL + offset, data);
664			WREG32(DMA_CLK_CTRL + offset, 0x00000100);
665		}
666	} else {
667		for (i = 0; i < adev->sdma.num_instances; i++) {
668			if (i == 0)
669				offset = DMA0_REGISTER_OFFSET;
670			else
671				offset = DMA1_REGISTER_OFFSET;
672			orig = data = RREG32(DMA_POWER_CNTL + offset);
673			data |= MEM_POWER_OVERRIDE;
674			if (data != orig)
675				WREG32(DMA_POWER_CNTL + offset, data);
676
677			orig = data = RREG32(DMA_CLK_CTRL + offset);
678			data = 0xff000000;
679			if (data != orig)
680				WREG32(DMA_CLK_CTRL + offset, data);
681		}
682	}
683
684	return 0;
685}
686
687static int si_dma_set_powergating_state(void *handle,
688					  enum amd_powergating_state state)
689{
690	u32 tmp;
691
692	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
693
694	WREG32(DMA_PGFSM_WRITE,  0x00002000);
695	WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
696
697	for (tmp = 0; tmp < 5; tmp++)
698		WREG32(DMA_PGFSM_WRITE, 0);
699
700	return 0;
701}
702
703static const struct amd_ip_funcs si_dma_ip_funcs = {
704	.name = "si_dma",
705	.early_init = si_dma_early_init,
706	.late_init = NULL,
707	.sw_init = si_dma_sw_init,
708	.sw_fini = si_dma_sw_fini,
709	.hw_init = si_dma_hw_init,
710	.hw_fini = si_dma_hw_fini,
711	.suspend = si_dma_suspend,
712	.resume = si_dma_resume,
713	.is_idle = si_dma_is_idle,
714	.wait_for_idle = si_dma_wait_for_idle,
715	.soft_reset = si_dma_soft_reset,
716	.set_clockgating_state = si_dma_set_clockgating_state,
717	.set_powergating_state = si_dma_set_powergating_state,
718};
719
720static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
721	.type = AMDGPU_RING_TYPE_SDMA,
722	.align_mask = 0xf,
723	.nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0),
724	.support_64bit_ptrs = false,
725	.get_rptr = si_dma_ring_get_rptr,
726	.get_wptr = si_dma_ring_get_wptr,
727	.set_wptr = si_dma_ring_set_wptr,
728	.emit_frame_size =
729		3 + 3 + /* hdp flush / invalidate */
730		6 + /* si_dma_ring_emit_pipeline_sync */
731		SI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* si_dma_ring_emit_vm_flush */
732		9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
733	.emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
734	.emit_ib = si_dma_ring_emit_ib,
735	.emit_fence = si_dma_ring_emit_fence,
736	.emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
737	.emit_vm_flush = si_dma_ring_emit_vm_flush,
738	.test_ring = si_dma_ring_test_ring,
739	.test_ib = si_dma_ring_test_ib,
740	.insert_nop = amdgpu_ring_insert_nop,
741	.pad_ib = si_dma_ring_pad_ib,
742	.emit_wreg = si_dma_ring_emit_wreg,
743};
744
745static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
746{
747	int i;
748
749	for (i = 0; i < adev->sdma.num_instances; i++)
750		adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs;
751}
752
753static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
754	.set = si_dma_set_trap_irq_state,
755	.process = si_dma_process_trap_irq,
756};
757
 
 
 
 
 
 
 
 
 
758static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
759{
760	adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
761	adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
 
 
762}
763
764/**
765 * si_dma_emit_copy_buffer - copy buffer using the sDMA engine
766 *
767 * @ib: indirect buffer to copy to
768 * @src_offset: src GPU address
769 * @dst_offset: dst GPU address
770 * @byte_count: number of bytes to xfer
771 * @tmz: is this a secure operation
772 *
773 * Copy GPU buffers using the DMA engine (VI).
774 * Used by the amdgpu ttm implementation to move pages if
775 * registered as the asic copy callback.
776 */
777static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
778				       uint64_t src_offset,
779				       uint64_t dst_offset,
780				       uint32_t byte_count,
781				       bool tmz)
782{
783	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
784					      1, 0, 0, byte_count);
785	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
786	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
787	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff;
788	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff;
789}
790
791/**
792 * si_dma_emit_fill_buffer - fill buffer using the sDMA engine
793 *
794 * @ib: indirect buffer to copy to
795 * @src_data: value to write to buffer
796 * @dst_offset: dst GPU address
797 * @byte_count: number of bytes to xfer
798 *
799 * Fill GPU buffers using the DMA engine (VI).
800 */
801static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib,
802				       uint32_t src_data,
803				       uint64_t dst_offset,
804				       uint32_t byte_count)
805{
806	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL,
807					      0, 0, 0, byte_count / 4);
808	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
809	ib->ptr[ib->length_dw++] = src_data;
810	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16;
811}
812
813
814static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = {
815	.copy_max_bytes = 0xffff8,
816	.copy_num_dw = 5,
817	.emit_copy_buffer = si_dma_emit_copy_buffer,
818
819	.fill_max_bytes = 0xffff8,
820	.fill_num_dw = 4,
821	.emit_fill_buffer = si_dma_emit_fill_buffer,
822};
823
824static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
825{
826	adev->mman.buffer_funcs = &si_dma_buffer_funcs;
827	adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 
 
828}
829
830static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
831	.copy_pte_num_dw = 5,
832	.copy_pte = si_dma_vm_copy_pte,
833
834	.write_pte = si_dma_vm_write_pte,
835	.set_pte_pde = si_dma_vm_set_pte_pde,
836};
837
838static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
839{
840	unsigned i;
841
842	adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
843	for (i = 0; i < adev->sdma.num_instances; i++) {
844		adev->vm_manager.vm_pte_scheds[i] =
845			&adev->sdma.instance[i].ring.sched;
 
 
 
846	}
847	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
848}
849
850const struct amdgpu_ip_block_version si_dma_ip_block =
851{
852	.type = AMD_IP_BLOCK_TYPE_SDMA,
853	.major = 1,
854	.minor = 0,
855	.rev = 0,
856	.funcs = &si_dma_ip_funcs,
857};
v4.17
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Alex Deucher
 23 */
 24#include <drm/drmP.h>
 25#include "amdgpu.h"
 26#include "amdgpu_trace.h"
 27#include "si.h"
 28#include "sid.h"
 29
 30const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
 31{
 32	DMA0_REGISTER_OFFSET,
 33	DMA1_REGISTER_OFFSET
 34};
 35
 36static void si_dma_set_ring_funcs(struct amdgpu_device *adev);
 37static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
 38static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
 39static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
 40
 41static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
 42{
 43	return ring->adev->wb.wb[ring->rptr_offs>>2];
 44}
 45
 46static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
 47{
 48	struct amdgpu_device *adev = ring->adev;
 49	u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
 50
 51	return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
 52}
 53
 54static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
 55{
 56	struct amdgpu_device *adev = ring->adev;
 57	u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
 58
 59	WREG32(DMA_RB_WPTR + sdma_offsets[me],
 60	       (lower_32_bits(ring->wptr) << 2) & 0x3fffc);
 61}
 62
 63static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
 
 64				struct amdgpu_ib *ib,
 65				unsigned vmid, bool ctx_switch)
 66{
 
 67	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
 68	 * Pad as necessary with NOPs.
 69	 */
 70	while ((lower_32_bits(ring->wptr) & 7) != 5)
 71		amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
 72	amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0));
 73	amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
 74	amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
 75
 76}
 77
 78/**
 79 * si_dma_ring_emit_fence - emit a fence on the DMA ring
 80 *
 81 * @ring: amdgpu ring pointer
 82 * @fence: amdgpu fence object
 
 
 83 *
 84 * Add a DMA fence packet to the ring to write
 85 * the fence seq number and DMA trap packet to generate
 86 * an interrupt if needed (VI).
 87 */
 88static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 89				      unsigned flags)
 90{
 91
 92	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
 93	/* write the fence */
 94	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
 95	amdgpu_ring_write(ring, addr & 0xfffffffc);
 96	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
 97	amdgpu_ring_write(ring, seq);
 98	/* optionally write high bits as well */
 99	if (write64bit) {
100		addr += 4;
101		amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
102		amdgpu_ring_write(ring, addr & 0xfffffffc);
103		amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
104		amdgpu_ring_write(ring, upper_32_bits(seq));
105	}
106	/* generate an interrupt */
107	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0));
108}
109
110static void si_dma_stop(struct amdgpu_device *adev)
111{
112	struct amdgpu_ring *ring;
113	u32 rb_cntl;
114	unsigned i;
115
 
 
116	for (i = 0; i < adev->sdma.num_instances; i++) {
117		ring = &adev->sdma.instance[i].ring;
118		/* dma0 */
119		rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
120		rb_cntl &= ~DMA_RB_ENABLE;
121		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
122
123		if (adev->mman.buffer_funcs_ring == ring)
124			amdgpu_ttm_set_buffer_funcs_status(adev, false);
125		ring->ready = false;
126	}
127}
128
129static int si_dma_start(struct amdgpu_device *adev)
130{
131	struct amdgpu_ring *ring;
132	u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz;
133	int i, r;
134	uint64_t rptr_addr;
135
136	for (i = 0; i < adev->sdma.num_instances; i++) {
137		ring = &adev->sdma.instance[i].ring;
138
139		WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
140		WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
141
142		/* Set ring buffer size in dwords */
143		rb_bufsz = order_base_2(ring->ring_size / 4);
144		rb_cntl = rb_bufsz << 1;
145#ifdef __BIG_ENDIAN
146		rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
147#endif
148		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
149
150		/* Initialize the ring buffer's read and write pointers */
151		WREG32(DMA_RB_RPTR + sdma_offsets[i], 0);
152		WREG32(DMA_RB_WPTR + sdma_offsets[i], 0);
153
154		rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
155
156		WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
157		WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
158
159		rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
160
161		WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
162
163		/* enable DMA IBs */
164		ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
165#ifdef __BIG_ENDIAN
166		ib_cntl |= DMA_IB_SWAP_ENABLE;
167#endif
168		WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl);
169
170		dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]);
171		dma_cntl &= ~CTXEMPTY_INT_ENABLE;
172		WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
173
174		ring->wptr = 0;
175		WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
176		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
177
178		ring->ready = true;
179
180		r = amdgpu_ring_test_ring(ring);
181		if (r) {
182			ring->ready = false;
183			return r;
184		}
185
186		if (adev->mman.buffer_funcs_ring == ring)
187			amdgpu_ttm_set_buffer_funcs_status(adev, true);
188	}
189
190	return 0;
191}
192
193/**
194 * si_dma_ring_test_ring - simple async dma engine test
195 *
196 * @ring: amdgpu_ring structure holding ring information
197 *
198 * Test the DMA engine by writing using it to write an
199 * value to memory. (VI).
200 * Returns 0 for success, error for failure.
201 */
202static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
203{
204	struct amdgpu_device *adev = ring->adev;
205	unsigned i;
206	unsigned index;
207	int r;
208	u32 tmp;
209	u64 gpu_addr;
210
211	r = amdgpu_device_wb_get(adev, &index);
212	if (r) {
213		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
214		return r;
215	}
216
217	gpu_addr = adev->wb.gpu_addr + (index * 4);
218	tmp = 0xCAFEDEAD;
219	adev->wb.wb[index] = cpu_to_le32(tmp);
220
221	r = amdgpu_ring_alloc(ring, 4);
222	if (r) {
223		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
224		amdgpu_device_wb_free(adev, index);
225		return r;
226	}
227
228	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
229	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
230	amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
231	amdgpu_ring_write(ring, 0xDEADBEEF);
232	amdgpu_ring_commit(ring);
233
234	for (i = 0; i < adev->usec_timeout; i++) {
235		tmp = le32_to_cpu(adev->wb.wb[index]);
236		if (tmp == 0xDEADBEEF)
237			break;
238		DRM_UDELAY(1);
239	}
240
241	if (i < adev->usec_timeout) {
242		DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
243	} else {
244		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
245			  ring->idx, tmp);
246		r = -EINVAL;
247	}
248	amdgpu_device_wb_free(adev, index);
249
250	return r;
251}
252
253/**
254 * si_dma_ring_test_ib - test an IB on the DMA engine
255 *
256 * @ring: amdgpu_ring structure holding ring information
 
257 *
258 * Test a simple IB in the DMA ring (VI).
259 * Returns 0 on success, error on failure.
260 */
261static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
262{
263	struct amdgpu_device *adev = ring->adev;
264	struct amdgpu_ib ib;
265	struct dma_fence *f = NULL;
266	unsigned index;
267	u32 tmp = 0;
268	u64 gpu_addr;
269	long r;
270
271	r = amdgpu_device_wb_get(adev, &index);
272	if (r) {
273		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
274		return r;
275	}
276
277	gpu_addr = adev->wb.gpu_addr + (index * 4);
278	tmp = 0xCAFEDEAD;
279	adev->wb.wb[index] = cpu_to_le32(tmp);
280	memset(&ib, 0, sizeof(ib));
281	r = amdgpu_ib_get(adev, NULL, 256, &ib);
282	if (r) {
283		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
284		goto err0;
285	}
286
287	ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
288	ib.ptr[1] = lower_32_bits(gpu_addr);
289	ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
290	ib.ptr[3] = 0xDEADBEEF;
291	ib.length_dw = 4;
292	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
293	if (r)
294		goto err1;
295
296	r = dma_fence_wait_timeout(f, false, timeout);
297	if (r == 0) {
298		DRM_ERROR("amdgpu: IB test timed out\n");
299		r = -ETIMEDOUT;
300		goto err1;
301	} else if (r < 0) {
302		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
303		goto err1;
304	}
305	tmp = le32_to_cpu(adev->wb.wb[index]);
306	if (tmp == 0xDEADBEEF) {
307		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
308		r = 0;
309	} else {
310		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
311		r = -EINVAL;
312	}
313
314err1:
315	amdgpu_ib_free(adev, &ib, NULL);
316	dma_fence_put(f);
317err0:
318	amdgpu_device_wb_free(adev, index);
319	return r;
320}
321
322/**
323 * cik_dma_vm_copy_pte - update PTEs by copying them from the GART
324 *
325 * @ib: indirect buffer to fill with commands
326 * @pe: addr of the page entry
327 * @src: src addr to copy from
328 * @count: number of page entries to update
329 *
330 * Update PTEs by copying them from the GART using DMA (SI).
331 */
332static void si_dma_vm_copy_pte(struct amdgpu_ib *ib,
333			       uint64_t pe, uint64_t src,
334			       unsigned count)
335{
336	unsigned bytes = count * 8;
337
338	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
339					      1, 0, 0, bytes);
340	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
341	ib->ptr[ib->length_dw++] = lower_32_bits(src);
342	ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
343	ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
344}
345
346/**
347 * si_dma_vm_write_pte - update PTEs by writing them manually
348 *
349 * @ib: indirect buffer to fill with commands
350 * @pe: addr of the page entry
351 * @value: dst addr to write into pe
352 * @count: number of page entries to update
353 * @incr: increase next addr by incr bytes
354 *
355 * Update PTEs by writing them manually using DMA (SI).
356 */
357static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
358				uint64_t value, unsigned count,
359				uint32_t incr)
360{
361	unsigned ndw = count * 2;
362
363	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
364	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
365	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
366	for (; ndw > 0; ndw -= 2) {
367		ib->ptr[ib->length_dw++] = lower_32_bits(value);
368		ib->ptr[ib->length_dw++] = upper_32_bits(value);
369		value += incr;
370	}
371}
372
373/**
374 * si_dma_vm_set_pte_pde - update the page tables using sDMA
375 *
376 * @ib: indirect buffer to fill with commands
377 * @pe: addr of the page entry
378 * @addr: dst addr to write into pe
379 * @count: number of page entries to update
380 * @incr: increase next addr by incr bytes
381 * @flags: access flags
382 *
383 * Update the page tables using sDMA (CIK).
384 */
385static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib,
386				     uint64_t pe,
387				     uint64_t addr, unsigned count,
388				     uint32_t incr, uint64_t flags)
389{
390	uint64_t value;
391	unsigned ndw;
392
393	while (count) {
394		ndw = count * 2;
395		if (ndw > 0xFFFFE)
396			ndw = 0xFFFFE;
397
398		if (flags & AMDGPU_PTE_VALID)
399			value = addr;
400		else
401			value = 0;
402
403		/* for physically contiguous pages (vram) */
404		ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
405		ib->ptr[ib->length_dw++] = pe; /* dst addr */
406		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
407		ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
408		ib->ptr[ib->length_dw++] = upper_32_bits(flags);
409		ib->ptr[ib->length_dw++] = value; /* value */
410		ib->ptr[ib->length_dw++] = upper_32_bits(value);
411		ib->ptr[ib->length_dw++] = incr; /* increment size */
412		ib->ptr[ib->length_dw++] = 0;
413		pe += ndw * 4;
414		addr += (ndw / 2) * incr;
415		count -= ndw / 2;
416	}
417}
418
419/**
420 * si_dma_pad_ib - pad the IB to the required number of dw
421 *
 
422 * @ib: indirect buffer to fill with padding
423 *
424 */
425static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
426{
427	while (ib->length_dw & 0x7)
428		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
429}
430
431/**
432 * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
433 *
434 * @ring: amdgpu_ring pointer
435 *
436 * Make sure all previous operations are completed (CIK).
437 */
438static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
439{
440	uint32_t seq = ring->fence_drv.sync_seq;
441	uint64_t addr = ring->fence_drv.gpu_addr;
442
443	/* wait for idle */
444	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) |
445			  (1 << 27)); /* Poll memory */
446	amdgpu_ring_write(ring, lower_32_bits(addr));
447	amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */
448	amdgpu_ring_write(ring, 0xffffffff); /* mask */
449	amdgpu_ring_write(ring, seq); /* value */
450	amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */
451}
452
453/**
454 * si_dma_ring_emit_vm_flush - cik vm flush using sDMA
455 *
456 * @ring: amdgpu_ring pointer
457 * @vm: amdgpu_vm pointer
 
458 *
459 * Update the page table base and flush the VM TLB
460 * using sDMA (VI).
461 */
462static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
463				      unsigned vmid, uint64_t pd_addr)
464{
465	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
466
467	/* wait for invalidate to complete */
468	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
469	amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
470	amdgpu_ring_write(ring, 0xff << 16); /* retry */
471	amdgpu_ring_write(ring, 1 << vmid); /* mask */
472	amdgpu_ring_write(ring, 0); /* value */
473	amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
474}
475
476static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring,
477				  uint32_t reg, uint32_t val)
478{
479	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
480	amdgpu_ring_write(ring, (0xf << 16) | reg);
481	amdgpu_ring_write(ring, val);
482}
483
484static int si_dma_early_init(void *handle)
485{
486	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
487
488	adev->sdma.num_instances = 2;
489
490	si_dma_set_ring_funcs(adev);
491	si_dma_set_buffer_funcs(adev);
492	si_dma_set_vm_pte_funcs(adev);
493	si_dma_set_irq_funcs(adev);
494
495	return 0;
496}
497
498static int si_dma_sw_init(void *handle)
499{
500	struct amdgpu_ring *ring;
501	int r, i;
502	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
503
504	/* DMA0 trap event */
505	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq);
 
506	if (r)
507		return r;
508
509	/* DMA1 trap event */
510	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 244, &adev->sdma.trap_irq_1);
 
511	if (r)
512		return r;
513
514	for (i = 0; i < adev->sdma.num_instances; i++) {
515		ring = &adev->sdma.instance[i].ring;
516		ring->ring_obj = NULL;
517		ring->use_doorbell = false;
518		sprintf(ring->name, "sdma%d", i);
519		r = amdgpu_ring_init(adev, ring, 1024,
520				     &adev->sdma.trap_irq,
521				     (i == 0) ?
522				     AMDGPU_SDMA_IRQ_TRAP0 :
523				     AMDGPU_SDMA_IRQ_TRAP1);
524		if (r)
525			return r;
526	}
527
528	return r;
529}
530
531static int si_dma_sw_fini(void *handle)
532{
533	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
534	int i;
535
536	for (i = 0; i < adev->sdma.num_instances; i++)
537		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
538
539	return 0;
540}
541
542static int si_dma_hw_init(void *handle)
543{
544	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
545
546	return si_dma_start(adev);
547}
548
549static int si_dma_hw_fini(void *handle)
550{
551	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
552
553	si_dma_stop(adev);
554
555	return 0;
556}
557
558static int si_dma_suspend(void *handle)
559{
560	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
561
562	return si_dma_hw_fini(adev);
563}
564
565static int si_dma_resume(void *handle)
566{
567	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
568
569	return si_dma_hw_init(adev);
570}
571
572static bool si_dma_is_idle(void *handle)
573{
574	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
575	u32 tmp = RREG32(SRBM_STATUS2);
576
577	if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
578	    return false;
579
580	return true;
581}
582
583static int si_dma_wait_for_idle(void *handle)
584{
585	unsigned i;
586	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
587
588	for (i = 0; i < adev->usec_timeout; i++) {
589		if (si_dma_is_idle(handle))
590			return 0;
591		udelay(1);
592	}
593	return -ETIMEDOUT;
594}
595
596static int si_dma_soft_reset(void *handle)
597{
598	DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n");
599	return 0;
600}
601
602static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
603					struct amdgpu_irq_src *src,
604					unsigned type,
605					enum amdgpu_interrupt_state state)
606{
607	u32 sdma_cntl;
608
609	switch (type) {
610	case AMDGPU_SDMA_IRQ_TRAP0:
611		switch (state) {
612		case AMDGPU_IRQ_STATE_DISABLE:
613			sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
614			sdma_cntl &= ~TRAP_ENABLE;
615			WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
616			break;
617		case AMDGPU_IRQ_STATE_ENABLE:
618			sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
619			sdma_cntl |= TRAP_ENABLE;
620			WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
621			break;
622		default:
623			break;
624		}
625		break;
626	case AMDGPU_SDMA_IRQ_TRAP1:
627		switch (state) {
628		case AMDGPU_IRQ_STATE_DISABLE:
629			sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
630			sdma_cntl &= ~TRAP_ENABLE;
631			WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
632			break;
633		case AMDGPU_IRQ_STATE_ENABLE:
634			sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
635			sdma_cntl |= TRAP_ENABLE;
636			WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
637			break;
638		default:
639			break;
640		}
641		break;
642	default:
643		break;
644	}
645	return 0;
646}
647
648static int si_dma_process_trap_irq(struct amdgpu_device *adev,
649				      struct amdgpu_irq_src *source,
650				      struct amdgpu_iv_entry *entry)
651{
652	amdgpu_fence_process(&adev->sdma.instance[0].ring);
653
654	return 0;
655}
656
657static int si_dma_process_trap_irq_1(struct amdgpu_device *adev,
658				      struct amdgpu_irq_src *source,
659				      struct amdgpu_iv_entry *entry)
660{
661	amdgpu_fence_process(&adev->sdma.instance[1].ring);
662
663	return 0;
664}
665
666static int si_dma_process_illegal_inst_irq(struct amdgpu_device *adev,
667					      struct amdgpu_irq_src *source,
668					      struct amdgpu_iv_entry *entry)
669{
670	DRM_ERROR("Illegal instruction in SDMA command stream\n");
671	schedule_work(&adev->reset_work);
672	return 0;
673}
674
675static int si_dma_set_clockgating_state(void *handle,
676					  enum amd_clockgating_state state)
677{
678	u32 orig, data, offset;
679	int i;
680	bool enable;
681	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
682
683	enable = (state == AMD_CG_STATE_GATE) ? true : false;
684
685	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
686		for (i = 0; i < adev->sdma.num_instances; i++) {
687			if (i == 0)
688				offset = DMA0_REGISTER_OFFSET;
689			else
690				offset = DMA1_REGISTER_OFFSET;
691			orig = data = RREG32(DMA_POWER_CNTL + offset);
692			data &= ~MEM_POWER_OVERRIDE;
693			if (data != orig)
694				WREG32(DMA_POWER_CNTL + offset, data);
695			WREG32(DMA_CLK_CTRL + offset, 0x00000100);
696		}
697	} else {
698		for (i = 0; i < adev->sdma.num_instances; i++) {
699			if (i == 0)
700				offset = DMA0_REGISTER_OFFSET;
701			else
702				offset = DMA1_REGISTER_OFFSET;
703			orig = data = RREG32(DMA_POWER_CNTL + offset);
704			data |= MEM_POWER_OVERRIDE;
705			if (data != orig)
706				WREG32(DMA_POWER_CNTL + offset, data);
707
708			orig = data = RREG32(DMA_CLK_CTRL + offset);
709			data = 0xff000000;
710			if (data != orig)
711				WREG32(DMA_CLK_CTRL + offset, data);
712		}
713	}
714
715	return 0;
716}
717
718static int si_dma_set_powergating_state(void *handle,
719					  enum amd_powergating_state state)
720{
721	u32 tmp;
722
723	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
724
725	WREG32(DMA_PGFSM_WRITE,  0x00002000);
726	WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
727
728	for (tmp = 0; tmp < 5; tmp++)
729		WREG32(DMA_PGFSM_WRITE, 0);
730
731	return 0;
732}
733
734static const struct amd_ip_funcs si_dma_ip_funcs = {
735	.name = "si_dma",
736	.early_init = si_dma_early_init,
737	.late_init = NULL,
738	.sw_init = si_dma_sw_init,
739	.sw_fini = si_dma_sw_fini,
740	.hw_init = si_dma_hw_init,
741	.hw_fini = si_dma_hw_fini,
742	.suspend = si_dma_suspend,
743	.resume = si_dma_resume,
744	.is_idle = si_dma_is_idle,
745	.wait_for_idle = si_dma_wait_for_idle,
746	.soft_reset = si_dma_soft_reset,
747	.set_clockgating_state = si_dma_set_clockgating_state,
748	.set_powergating_state = si_dma_set_powergating_state,
749};
750
751static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
752	.type = AMDGPU_RING_TYPE_SDMA,
753	.align_mask = 0xf,
754	.nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0),
755	.support_64bit_ptrs = false,
756	.get_rptr = si_dma_ring_get_rptr,
757	.get_wptr = si_dma_ring_get_wptr,
758	.set_wptr = si_dma_ring_set_wptr,
759	.emit_frame_size =
760		3 + 3 + /* hdp flush / invalidate */
761		6 + /* si_dma_ring_emit_pipeline_sync */
762		SI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* si_dma_ring_emit_vm_flush */
763		9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
764	.emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
765	.emit_ib = si_dma_ring_emit_ib,
766	.emit_fence = si_dma_ring_emit_fence,
767	.emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
768	.emit_vm_flush = si_dma_ring_emit_vm_flush,
769	.test_ring = si_dma_ring_test_ring,
770	.test_ib = si_dma_ring_test_ib,
771	.insert_nop = amdgpu_ring_insert_nop,
772	.pad_ib = si_dma_ring_pad_ib,
773	.emit_wreg = si_dma_ring_emit_wreg,
774};
775
776static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
777{
778	int i;
779
780	for (i = 0; i < adev->sdma.num_instances; i++)
781		adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs;
782}
783
784static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
785	.set = si_dma_set_trap_irq_state,
786	.process = si_dma_process_trap_irq,
787};
788
789static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs_1 = {
790	.set = si_dma_set_trap_irq_state,
791	.process = si_dma_process_trap_irq_1,
792};
793
794static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = {
795	.process = si_dma_process_illegal_inst_irq,
796};
797
798static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
799{
800	adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
801	adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
802	adev->sdma.trap_irq_1.funcs = &si_dma_trap_irq_funcs_1;
803	adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs;
804}
805
806/**
807 * si_dma_emit_copy_buffer - copy buffer using the sDMA engine
808 *
809 * @ring: amdgpu_ring structure holding ring information
810 * @src_offset: src GPU address
811 * @dst_offset: dst GPU address
812 * @byte_count: number of bytes to xfer
 
813 *
814 * Copy GPU buffers using the DMA engine (VI).
815 * Used by the amdgpu ttm implementation to move pages if
816 * registered as the asic copy callback.
817 */
818static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
819				       uint64_t src_offset,
820				       uint64_t dst_offset,
821				       uint32_t byte_count)
 
822{
823	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
824					      1, 0, 0, byte_count);
825	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
826	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
827	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff;
828	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff;
829}
830
831/**
832 * si_dma_emit_fill_buffer - fill buffer using the sDMA engine
833 *
834 * @ring: amdgpu_ring structure holding ring information
835 * @src_data: value to write to buffer
836 * @dst_offset: dst GPU address
837 * @byte_count: number of bytes to xfer
838 *
839 * Fill GPU buffers using the DMA engine (VI).
840 */
841static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib,
842				       uint32_t src_data,
843				       uint64_t dst_offset,
844				       uint32_t byte_count)
845{
846	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL,
847					      0, 0, 0, byte_count / 4);
848	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
849	ib->ptr[ib->length_dw++] = src_data;
850	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16;
851}
852
853
854static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = {
855	.copy_max_bytes = 0xffff8,
856	.copy_num_dw = 5,
857	.emit_copy_buffer = si_dma_emit_copy_buffer,
858
859	.fill_max_bytes = 0xffff8,
860	.fill_num_dw = 4,
861	.emit_fill_buffer = si_dma_emit_fill_buffer,
862};
863
864static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
865{
866	if (adev->mman.buffer_funcs == NULL) {
867		adev->mman.buffer_funcs = &si_dma_buffer_funcs;
868		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
869	}
870}
871
872static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
873	.copy_pte_num_dw = 5,
874	.copy_pte = si_dma_vm_copy_pte,
875
876	.write_pte = si_dma_vm_write_pte,
877	.set_pte_pde = si_dma_vm_set_pte_pde,
878};
879
880static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
881{
882	unsigned i;
883
884	if (adev->vm_manager.vm_pte_funcs == NULL) {
885		adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
886		for (i = 0; i < adev->sdma.num_instances; i++)
887			adev->vm_manager.vm_pte_rings[i] =
888				&adev->sdma.instance[i].ring;
889
890		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
891	}
 
892}
893
894const struct amdgpu_ip_block_version si_dma_ip_block =
895{
896	.type = AMD_IP_BLOCK_TYPE_SDMA,
897	.major = 1,
898	.minor = 0,
899	.rev = 0,
900	.funcs = &si_dma_ip_funcs,
901};