Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2013 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Alex Deucher
 23 */
 24#include <drm/drmP.h>
 25#include "radeon.h"
 26#include "radeon_asic.h"
 27#include "radeon_trace.h"
 28#include "sid.h"
 29
 30u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
 31
 32/**
 33 * si_dma_is_lockup - Check if the DMA engine is locked up
 34 *
 35 * @rdev: radeon_device pointer
 36 * @ring: radeon_ring structure holding ring information
 37 *
 38 * Check if the async DMA engine is locked up.
 39 * Returns true if the engine appears to be locked up, false if not.
 40 */
 41bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 42{
 43	u32 reset_mask = si_gpu_check_soft_reset(rdev);
 44	u32 mask;
 45
 46	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
 47		mask = RADEON_RESET_DMA;
 48	else
 49		mask = RADEON_RESET_DMA1;
 50
 51	if (!(reset_mask & mask)) {
 52		radeon_ring_lockup_update(rdev, ring);
 53		return false;
 54	}
 55	return radeon_ring_test_lockup(rdev, ring);
 56}
 57
 58/**
 59 * si_dma_vm_set_page - update the page tables using the DMA
 60 *
 61 * @rdev: radeon_device pointer
 62 * @ib: indirect buffer to fill with commands
 63 * @pe: addr of the page entry
 64 * @addr: dst addr to write into pe
 65 * @count: number of page entries to update
 66 * @incr: increase next addr by incr bytes
 67 * @flags: access flags
 68 *
 69 * Update the page tables using the DMA (SI).
 70 */
 71void si_dma_vm_set_page(struct radeon_device *rdev,
 72			struct radeon_ib *ib,
 73			uint64_t pe,
 74			uint64_t addr, unsigned count,
 75			uint32_t incr, uint32_t flags)
 76{
 77	uint64_t value;
 78	unsigned ndw;
 79
 80	trace_radeon_vm_set_page(pe, addr, count, incr, flags);
 81
 82	if (flags & R600_PTE_SYSTEM) {
 83		while (count) {
 84			ndw = count * 2;
 85			if (ndw > 0xFFFFE)
 86				ndw = 0xFFFFE;
 87
 88			/* for non-physically contiguous pages (system) */
 89			ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
 90			ib->ptr[ib->length_dw++] = pe;
 91			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
 92			for (; ndw > 0; ndw -= 2, --count, pe += 8) {
 93				value = radeon_vm_map_gart(rdev, addr);
 94				value &= 0xFFFFFFFFFFFFF000ULL;
 95				addr += incr;
 96				value |= flags;
 97				ib->ptr[ib->length_dw++] = value;
 98				ib->ptr[ib->length_dw++] = upper_32_bits(value);
 99			}
100		}
101	} else {
102		while (count) {
103			ndw = count * 2;
104			if (ndw > 0xFFFFE)
105				ndw = 0xFFFFE;
106
107			if (flags & R600_PTE_VALID)
108				value = addr;
109			else
110				value = 0;
111			/* for physically contiguous pages (vram) */
112			ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
113			ib->ptr[ib->length_dw++] = pe; /* dst addr */
114			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
115			ib->ptr[ib->length_dw++] = flags; /* mask */
116			ib->ptr[ib->length_dw++] = 0;
117			ib->ptr[ib->length_dw++] = value; /* value */
118			ib->ptr[ib->length_dw++] = upper_32_bits(value);
119			ib->ptr[ib->length_dw++] = incr; /* increment size */
120			ib->ptr[ib->length_dw++] = 0;
121			pe += ndw * 4;
122			addr += (ndw / 2) * incr;
123			count -= ndw / 2;
124		}
125	}
126	while (ib->length_dw & 0x7)
127		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
128}
129
130void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
131{
132	struct radeon_ring *ring = &rdev->ring[ridx];
133
134	if (vm == NULL)
135		return;
136
137	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
138	if (vm->id < 8) {
139		radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
140	} else {
141		radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
142	}
143	radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
144
145	/* flush hdp cache */
146	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
147	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
148	radeon_ring_write(ring, 1);
149
150	/* bits 0-7 are the VM contexts0-7 */
151	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
152	radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
153	radeon_ring_write(ring, 1 << vm->id);
154}
155
156/**
157 * si_copy_dma - copy pages using the DMA engine
158 *
159 * @rdev: radeon_device pointer
160 * @src_offset: src GPU address
161 * @dst_offset: dst GPU address
162 * @num_gpu_pages: number of GPU pages to xfer
163 * @fence: radeon fence object
164 *
165 * Copy GPU paging using the DMA engine (SI).
166 * Used by the radeon ttm implementation to move pages if
167 * registered as the asic copy callback.
168 */
169int si_copy_dma(struct radeon_device *rdev,
170		uint64_t src_offset, uint64_t dst_offset,
171		unsigned num_gpu_pages,
172		struct radeon_fence **fence)
173{
174	struct radeon_semaphore *sem = NULL;
175	int ring_index = rdev->asic->copy.dma_ring_index;
176	struct radeon_ring *ring = &rdev->ring[ring_index];
177	u32 size_in_bytes, cur_size_in_bytes;
178	int i, num_loops;
179	int r = 0;
180
181	r = radeon_semaphore_create(rdev, &sem);
182	if (r) {
183		DRM_ERROR("radeon: moving bo (%d).\n", r);
184		return r;
185	}
186
187	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
188	num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
189	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
190	if (r) {
191		DRM_ERROR("radeon: moving bo (%d).\n", r);
192		radeon_semaphore_free(rdev, &sem, NULL);
193		return r;
194	}
195
196	radeon_semaphore_sync_to(sem, *fence);
197	radeon_semaphore_sync_rings(rdev, sem, ring->idx);
198
199	for (i = 0; i < num_loops; i++) {
200		cur_size_in_bytes = size_in_bytes;
201		if (cur_size_in_bytes > 0xFFFFF)
202			cur_size_in_bytes = 0xFFFFF;
203		size_in_bytes -= cur_size_in_bytes;
204		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
205		radeon_ring_write(ring, dst_offset & 0xffffffff);
206		radeon_ring_write(ring, src_offset & 0xffffffff);
207		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
208		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
209		src_offset += cur_size_in_bytes;
210		dst_offset += cur_size_in_bytes;
211	}
212
213	r = radeon_fence_emit(rdev, fence, ring->idx);
214	if (r) {
215		radeon_ring_unlock_undo(rdev, ring);
216		radeon_semaphore_free(rdev, &sem, NULL);
217		return r;
218	}
219
220	radeon_ring_unlock_commit(rdev, ring);
221	radeon_semaphore_free(rdev, &sem, *fence);
222
223	return r;
224}
225